Project Submission Team Members -

  1. Manasvi Jindal 03101192022 AI/ML IGDTUW
  2. Monya Mehta 03401192022 AI/ML IGDTUW

Loading the data¶

In [ ]:
from google.colab import drive
drive.mount('/content/drive')
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).

Importing the necessary libraries

In [ ]:
!pip install tf_explain
Requirement already satisfied: tf_explain in /usr/local/lib/python3.10/dist-packages (0.3.1)

Importing the libraries¶

In [ ]:
import cv2
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras.utils import img_to_array
import tensorflow as tf
import os
from keras.layers import Input
from keras.layers import Conv2D
from keras.layers import Dropout
from keras.layers import Concatenate
from keras.layers import MaxPooling2D
from keras.layers import Conv2DTranspose
from keras import Model
from sklearn.model_selection import train_test_split
from keras import backend as K
from keras.losses import BinaryCrossentropy
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
from keras.models import Model
from keras.layers import Layer
from keras.layers import Dropout
from keras.layers import UpSampling2D
from keras.layers import concatenate
from keras.layers import Add
from keras.layers import Multiply
from keras.layers import Input
from keras.layers import BatchNormalization
from keras.callbacks import Callback
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from tf_explain.core.grad_cam import GradCAM
from keras.metrics import MeanIoU
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.utils import to_categorical
import pandas as pd
import seaborn
from numpy.random import randint
from keras.layers import BatchNormalization
from keras.models import Sequential
from keras.layers import LeakyReLU
from keras.optimizers import Adam
from keras.layers import Flatten
from keras.layers import Dense
from keras.utils import plot_model
from tensorflow.keras import layers
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from tensorflow.keras.applications.vgg16 import VGG16
from keras.layers import MaxPool2D
from keras.layers import Activation, Reshape, Dense
from keras.layers import AveragePooling2D
from tensorflow.keras.applications import ResNet50
from keras.layers import ReLU
import shutil
import pandas
In [ ]:
path = '/content/drive/MyDrive/Dataset_BUSI_with_GT/'

Classification using VGG¶

Loading the data¶

In [ ]:
os.listdir(path)
Out[ ]:
['normal', 'benign', 'malignant']
In [ ]:
info = os.listdir(path)
In [ ]:
images = []
labels = []
masks = []
label_num = -1
image_dict = {}
for label_class in os.listdir(path):
    new_path = os.path.join(path, label_class)
    label_num += 1
    for img in os.listdir(new_path):
        if 'mask' not in img:
            image_id = img.split('.')[0]
            mask_files = [mask for mask in os.listdir(new_path) if f'{image_id}_mask' in mask]
            labels.append(label_num)
            x = cv2.imread(os.path.join(new_path, img), cv2.IMREAD_GRAYSCALE)
            images.append(img_to_array(Image.fromarray(cv2.resize(x, (128, 128)))))
            if mask_files:
                combined_mask = np.zeros((128, 128), dtype=np.uint8)
                for mask_file in mask_files:
                    mask = cv2.imread(os.path.join(new_path, mask_file), cv2.IMREAD_GRAYSCALE)
                    mask = cv2.resize(mask, (128, 128))
                    combined_mask = np.maximum(combined_mask, mask)
                masks.append(img_to_array(Image.fromarray(combined_mask).convert('L')))

print("Images and masks processed.")
Images and masks processed.
In [ ]:
images = np.array(images)
labels = np.array(labels)
masks = np.array(masks)
In [ ]:
plt.figure(figsize = (20,8))
for i in range(5) :
    x = np.random.randint(0,780)
    plt.subplot(2,5,i+1)
    plt.imshow(images[x], 'gray')
    plt.title(info[labels[x]])
    plt.axis('off')
    plt.subplot(2,5,i+6)
    plt.imshow(masks[x], 'gray')
    plt.title('Mask Image')
    plt.axis('off')
plt.show()
In [ ]:
images/= 255.0
In [ ]:
labels = to_categorical(labels)
In [ ]:
print(images.shape)
print(labels.shape)
print(masks.shape)
In [ ]:
print(images.max())
print(images.min())
1.0
0.0
In [ ]:
seaborn.histplot(data = pandas.DataFrame({'id' : [info[p] for p in np.argmax(labels, axis = 1)]}), x = 'id')
plt.title('Distribution of classes accross the entire dataset', fontsize = 15)
Out[ ]:
Text(0.5, 1.0, 'Distribution of classes accross the entire dataset')

Train test split¶

In [ ]:
def greytocolor(img):
  colored_img = img.repeat(3,axis = -1)
  return colored_img
In [ ]:
colored_images = []
for i in range(len(images)):
    colored_img = greytocolor(images[i])
    colored_images.append(colored_img)
colored_images = np.array(colored_images)
---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-33-65e044c95231> in <cell line: 2>()
      1 colored_images = []
----> 2 for i in range(len(images)):
      3     colored_img = greytocolor(images[i])
      4     colored_images.append(colored_img)
      5 colored_images = np.array(colored_images)

NameError: name 'images' is not defined
In [ ]:
img_train, img_test, labels_train, labels_test = train_test_split(colored_images, labels, test_size = 0.1, shuffle = True, random_state = 1)
---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-34-0b4202c319a2> in <cell line: 1>()
----> 1 img_train, img_test, labels_train, labels_test = train_test_split(colored_images, labels, test_size = 0.1, shuffle = True, random_state = 1)

NameError: name 'labels' is not defined
In [ ]:
print(img_train.shape)
print(labels_train.shape)
---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-35-4e16c8666e6d> in <cell line: 1>()
----> 1 print(img_train.shape)
      2 print(labels_train.shape)

NameError: name 'img_train' is not defined
In [ ]:
print(img_test.shape)
print(labels_test.shape)
(78, 128, 128, 3)
(78, 3)

Building Model¶

In [ ]:
vgg_model = VGG16(input_shape = (128, 128, 3),
include_top = False,
weights = 'imagenet')
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5
58889256/58889256 [==============================] - 1s 0us/step
In [ ]:
for layer in vgg_model.layers:
    layer.trainable = False
In [ ]:
def VGG():

    model = Sequential()
    model.add(vgg_model)

    model.add(Flatten())
    model.add(BatchNormalization())
    model.add(Dense(512,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(256,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(128,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(64,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(3,activation='softmax'))

    model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['acc'])

    return model
In [ ]:
vgg16_model = VGG()
vgg16_model.summary()
Model: "sequential"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 vgg16 (Functional)          (None, 4, 4, 512)         14714688  
                                                                 
 flatten (Flatten)           (None, 8192)              0         
                                                                 
 batch_normalization (Batch  (None, 8192)              32768     
 Normalization)                                                  
                                                                 
 dense (Dense)               (None, 512)               4194816   
                                                                 
 dropout (Dropout)           (None, 512)               0         
                                                                 
 dense_1 (Dense)             (None, 256)               131328    
                                                                 
 dropout_1 (Dropout)         (None, 256)               0         
                                                                 
 dense_2 (Dense)             (None, 128)               32896     
                                                                 
 dropout_2 (Dropout)         (None, 128)               0         
                                                                 
 dense_3 (Dense)             (None, 64)                8256      
                                                                 
 dropout_3 (Dropout)         (None, 64)                0         
                                                                 
 dense_4 (Dense)             (None, 3)                 195       
                                                                 
=================================================================
Total params: 19114947 (72.92 MB)
Trainable params: 4383875 (16.72 MB)
Non-trainable params: 14731072 (56.19 MB)
_________________________________________________________________

Training of model¶

In [ ]:
checkp = ModelCheckpoint('./vgg_classifier_70_v3.h5', monitor = 'val_loss', save_best_only = True, verbose = 1)
In [ ]:
history = vgg16_model.fit(img_train,labels_train,batch_size = 32,
                    validation_data = (img_test, labels_test),
                    epochs=70, callbacks=[checkp])
Epoch 1/70
22/22 [==============================] - ETA: 0s - loss: 1.7501 - acc: 0.4316
Epoch 1: val_loss improved from inf to 0.88725, saving model to ./vgg_classifier_70_v3.h5
/usr/local/lib/python3.10/dist-packages/keras/src/engine/training.py:3079: UserWarning: You are saving your model as an HDF5 file via `model.save()`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')`.
  saving_api.save_model(
22/22 [==============================] - 24s 227ms/step - loss: 1.7501 - acc: 0.4316 - val_loss: 0.8872 - val_acc: 0.5897
Epoch 2/70
22/22 [==============================] - ETA: 0s - loss: 1.5814 - acc: 0.4986
Epoch 2: val_loss improved from 0.88725 to 0.86572, saving model to ./vgg_classifier_70_v3.h5
22/22 [==============================] - 1s 65ms/step - loss: 1.5814 - acc: 0.4986 - val_loss: 0.8657 - val_acc: 0.6667
Epoch 3/70
21/22 [===========================>..] - ETA: 0s - loss: 1.2942 - acc: 0.5655
Epoch 3: val_loss did not improve from 0.86572
22/22 [==============================] - 1s 48ms/step - loss: 1.2960 - acc: 0.5641 - val_loss: 0.8833 - val_acc: 0.6923
Epoch 4/70
21/22 [===========================>..] - ETA: 0s - loss: 1.1634 - acc: 0.5417
Epoch 4: val_loss did not improve from 0.86572
22/22 [==============================] - 1s 50ms/step - loss: 1.1585 - acc: 0.5427 - val_loss: 0.8785 - val_acc: 0.7051
Epoch 5/70
21/22 [===========================>..] - ETA: 0s - loss: 1.0988 - acc: 0.5714
Epoch 5: val_loss did not improve from 0.86572
22/22 [==============================] - 1s 48ms/step - loss: 1.0872 - acc: 0.5726 - val_loss: 0.8906 - val_acc: 0.7179
Epoch 6/70
21/22 [===========================>..] - ETA: 0s - loss: 0.9141 - acc: 0.6369
Epoch 6: val_loss did not improve from 0.86572
22/22 [==============================] - 1s 50ms/step - loss: 0.9049 - acc: 0.6396 - val_loss: 0.8700 - val_acc: 0.7436
Epoch 7/70
21/22 [===========================>..] - ETA: 0s - loss: 0.8390 - acc: 0.6503
Epoch 7: val_loss improved from 0.86572 to 0.78452, saving model to ./vgg_classifier_70_v3.h5
22/22 [==============================] - 1s 62ms/step - loss: 0.8198 - acc: 0.6581 - val_loss: 0.7845 - val_acc: 0.7436
Epoch 8/70
21/22 [===========================>..] - ETA: 0s - loss: 0.8212 - acc: 0.6696
Epoch 8: val_loss improved from 0.78452 to 0.72192, saving model to ./vgg_classifier_70_v3.h5
22/22 [==============================] - 1s 60ms/step - loss: 0.8168 - acc: 0.6709 - val_loss: 0.7219 - val_acc: 0.7821
Epoch 9/70
21/22 [===========================>..] - ETA: 0s - loss: 0.7179 - acc: 0.7188
Epoch 9: val_loss improved from 0.72192 to 0.69724, saving model to ./vgg_classifier_70_v3.h5
22/22 [==============================] - 4s 190ms/step - loss: 0.7176 - acc: 0.7179 - val_loss: 0.6972 - val_acc: 0.7692
Epoch 10/70
21/22 [===========================>..] - ETA: 0s - loss: 0.6619 - acc: 0.7217
Epoch 10: val_loss improved from 0.69724 to 0.65818, saving model to ./vgg_classifier_70_v3.h5
22/22 [==============================] - 2s 75ms/step - loss: 0.6518 - acc: 0.7251 - val_loss: 0.6582 - val_acc: 0.7949
Epoch 11/70
22/22 [==============================] - ETA: 0s - loss: 0.6262 - acc: 0.7664
Epoch 11: val_loss improved from 0.65818 to 0.61619, saving model to ./vgg_classifier_70_v3.h5
22/22 [==============================] - 1s 62ms/step - loss: 0.6262 - acc: 0.7664 - val_loss: 0.6162 - val_acc: 0.7692
Epoch 12/70
21/22 [===========================>..] - ETA: 0s - loss: 0.5776 - acc: 0.7693
Epoch 12: val_loss improved from 0.61619 to 0.58670, saving model to ./vgg_classifier_70_v3.h5
22/22 [==============================] - 2s 90ms/step - loss: 0.5711 - acc: 0.7707 - val_loss: 0.5867 - val_acc: 0.7692
Epoch 13/70
21/22 [===========================>..] - ETA: 0s - loss: 0.5059 - acc: 0.8065
Epoch 13: val_loss improved from 0.58670 to 0.54519, saving model to ./vgg_classifier_70_v3.h5
22/22 [==============================] - 1s 63ms/step - loss: 0.5061 - acc: 0.8034 - val_loss: 0.5452 - val_acc: 0.7692
Epoch 14/70
21/22 [===========================>..] - ETA: 0s - loss: 0.4384 - acc: 0.8274
Epoch 14: val_loss improved from 0.54519 to 0.52038, saving model to ./vgg_classifier_70_v3.h5
22/22 [==============================] - 1s 61ms/step - loss: 0.4273 - acc: 0.8319 - val_loss: 0.5204 - val_acc: 0.8462
Epoch 15/70
21/22 [===========================>..] - ETA: 0s - loss: 0.4422 - acc: 0.8363
Epoch 15: val_loss improved from 0.52038 to 0.46140, saving model to ./vgg_classifier_70_v3.h5
22/22 [==============================] - 3s 137ms/step - loss: 0.4332 - acc: 0.8405 - val_loss: 0.4614 - val_acc: 0.8462
Epoch 16/70
22/22 [==============================] - ETA: 0s - loss: 0.3767 - acc: 0.8561
Epoch 16: val_loss improved from 0.46140 to 0.45355, saving model to ./vgg_classifier_70_v3.h5
22/22 [==============================] - 1s 65ms/step - loss: 0.3767 - acc: 0.8561 - val_loss: 0.4535 - val_acc: 0.8205
Epoch 17/70
21/22 [===========================>..] - ETA: 0s - loss: 0.2935 - acc: 0.8899
Epoch 17: val_loss improved from 0.45355 to 0.44610, saving model to ./vgg_classifier_70_v3.h5
22/22 [==============================] - 2s 70ms/step - loss: 0.2891 - acc: 0.8903 - val_loss: 0.4461 - val_acc: 0.8590
Epoch 18/70
21/22 [===========================>..] - ETA: 0s - loss: 0.3460 - acc: 0.8795
Epoch 18: val_loss did not improve from 0.44610
22/22 [==============================] - 1s 53ms/step - loss: 0.3370 - acc: 0.8832 - val_loss: 0.4558 - val_acc: 0.8333
Epoch 19/70
22/22 [==============================] - ETA: 0s - loss: 0.2452 - acc: 0.9145
Epoch 19: val_loss did not improve from 0.44610
22/22 [==============================] - 1s 51ms/step - loss: 0.2452 - acc: 0.9145 - val_loss: 0.5256 - val_acc: 0.8077
Epoch 20/70
21/22 [===========================>..] - ETA: 0s - loss: 0.2341 - acc: 0.9226
Epoch 20: val_loss did not improve from 0.44610
22/22 [==============================] - 1s 51ms/step - loss: 0.2350 - acc: 0.9188 - val_loss: 0.4727 - val_acc: 0.8333
Epoch 21/70
21/22 [===========================>..] - ETA: 0s - loss: 0.2538 - acc: 0.9167
Epoch 21: val_loss did not improve from 0.44610
22/22 [==============================] - 1s 50ms/step - loss: 0.2610 - acc: 0.9145 - val_loss: 0.4732 - val_acc: 0.8718
Epoch 22/70
21/22 [===========================>..] - ETA: 0s - loss: 0.2238 - acc: 0.9226
Epoch 22: val_loss did not improve from 0.44610
22/22 [==============================] - 1s 49ms/step - loss: 0.2200 - acc: 0.9245 - val_loss: 0.4470 - val_acc: 0.8718
Epoch 23/70
21/22 [===========================>..] - ETA: 0s - loss: 0.2177 - acc: 0.9315
Epoch 23: val_loss did not improve from 0.44610
22/22 [==============================] - 1s 49ms/step - loss: 0.2232 - acc: 0.9288 - val_loss: 0.4642 - val_acc: 0.8590
Epoch 24/70
21/22 [===========================>..] - ETA: 0s - loss: 0.2774 - acc: 0.9286
Epoch 24: val_loss improved from 0.44610 to 0.42954, saving model to ./vgg_classifier_70_v3.h5
22/22 [==============================] - 1s 62ms/step - loss: 0.2718 - acc: 0.9288 - val_loss: 0.4295 - val_acc: 0.8462
Epoch 25/70
21/22 [===========================>..] - ETA: 0s - loss: 0.1805 - acc: 0.9420
Epoch 25: val_loss did not improve from 0.42954
22/22 [==============================] - 1s 51ms/step - loss: 0.1817 - acc: 0.9387 - val_loss: 0.4315 - val_acc: 0.8718
Epoch 26/70
21/22 [===========================>..] - ETA: 0s - loss: 0.2044 - acc: 0.9479
Epoch 26: val_loss improved from 0.42954 to 0.30430, saving model to ./vgg_classifier_70_v3.h5
22/22 [==============================] - 1s 61ms/step - loss: 0.2039 - acc: 0.9473 - val_loss: 0.3043 - val_acc: 0.8718
Epoch 27/70
22/22 [==============================] - ETA: 0s - loss: 0.1538 - acc: 0.9530
Epoch 27: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 50ms/step - loss: 0.1538 - acc: 0.9530 - val_loss: 0.3089 - val_acc: 0.8718
Epoch 28/70
21/22 [===========================>..] - ETA: 0s - loss: 0.1873 - acc: 0.9539
Epoch 28: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 52ms/step - loss: 0.1837 - acc: 0.9544 - val_loss: 0.3800 - val_acc: 0.8333
Epoch 29/70
21/22 [===========================>..] - ETA: 0s - loss: 0.1722 - acc: 0.9435
Epoch 29: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 52ms/step - loss: 0.1662 - acc: 0.9459 - val_loss: 0.3729 - val_acc: 0.8462
Epoch 30/70
22/22 [==============================] - ETA: 0s - loss: 0.0994 - acc: 0.9701
Epoch 30: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 52ms/step - loss: 0.0994 - acc: 0.9701 - val_loss: 0.3644 - val_acc: 0.8462
Epoch 31/70
21/22 [===========================>..] - ETA: 0s - loss: 0.1236 - acc: 0.9658
Epoch 31: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 52ms/step - loss: 0.1210 - acc: 0.9658 - val_loss: 0.3881 - val_acc: 0.8590
Epoch 32/70
21/22 [===========================>..] - ETA: 0s - loss: 0.1072 - acc: 0.9807
Epoch 32: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 51ms/step - loss: 0.1070 - acc: 0.9786 - val_loss: 0.4114 - val_acc: 0.8718
Epoch 33/70
21/22 [===========================>..] - ETA: 0s - loss: 0.1121 - acc: 0.9688
Epoch 33: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 49ms/step - loss: 0.1108 - acc: 0.9687 - val_loss: 0.4341 - val_acc: 0.8718
Epoch 34/70
21/22 [===========================>..] - ETA: 0s - loss: 0.1597 - acc: 0.9658
Epoch 34: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 52ms/step - loss: 0.1578 - acc: 0.9644 - val_loss: 0.4570 - val_acc: 0.8590
Epoch 35/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0860 - acc: 0.9673
Epoch 35: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 52ms/step - loss: 0.0873 - acc: 0.9672 - val_loss: 0.5484 - val_acc: 0.8718
Epoch 36/70
22/22 [==============================] - ETA: 0s - loss: 0.1399 - acc: 0.9630
Epoch 36: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 50ms/step - loss: 0.1399 - acc: 0.9630 - val_loss: 0.5578 - val_acc: 0.8846
Epoch 37/70
21/22 [===========================>..] - ETA: 0s - loss: 0.1336 - acc: 0.9717
Epoch 37: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 52ms/step - loss: 0.1294 - acc: 0.9729 - val_loss: 0.5735 - val_acc: 0.8462
Epoch 38/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0967 - acc: 0.9747
Epoch 38: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 50ms/step - loss: 0.0980 - acc: 0.9729 - val_loss: 0.5132 - val_acc: 0.8590
Epoch 39/70
21/22 [===========================>..] - ETA: 0s - loss: 0.1059 - acc: 0.9762
Epoch 39: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 50ms/step - loss: 0.1022 - acc: 0.9772 - val_loss: 0.5468 - val_acc: 0.8846
Epoch 40/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0746 - acc: 0.9777
Epoch 40: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 51ms/step - loss: 0.0964 - acc: 0.9758 - val_loss: 0.6269 - val_acc: 0.9103
Epoch 41/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0607 - acc: 0.9821
Epoch 41: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 55ms/step - loss: 0.0618 - acc: 0.9815 - val_loss: 0.5733 - val_acc: 0.8462
Epoch 42/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0604 - acc: 0.9821
Epoch 42: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 53ms/step - loss: 0.0713 - acc: 0.9815 - val_loss: 0.7066 - val_acc: 0.8846
Epoch 43/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0555 - acc: 0.9732
Epoch 43: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 53ms/step - loss: 0.0532 - acc: 0.9744 - val_loss: 0.6655 - val_acc: 0.8974
Epoch 44/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0641 - acc: 0.9836
Epoch 44: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 50ms/step - loss: 0.0627 - acc: 0.9843 - val_loss: 0.7866 - val_acc: 0.8846
Epoch 45/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0524 - acc: 0.9807
Epoch 45: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 52ms/step - loss: 0.0579 - acc: 0.9801 - val_loss: 0.8071 - val_acc: 0.9103
Epoch 46/70
21/22 [===========================>..] - ETA: 0s - loss: 0.1362 - acc: 0.9673
Epoch 46: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 50ms/step - loss: 0.1315 - acc: 0.9687 - val_loss: 0.6095 - val_acc: 0.8846
Epoch 47/70
22/22 [==============================] - ETA: 0s - loss: 0.0621 - acc: 0.9758
Epoch 47: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 52ms/step - loss: 0.0621 - acc: 0.9758 - val_loss: 0.5803 - val_acc: 0.8974
Epoch 48/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0592 - acc: 0.9777
Epoch 48: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 50ms/step - loss: 0.0631 - acc: 0.9772 - val_loss: 0.7561 - val_acc: 0.8718
Epoch 49/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0793 - acc: 0.9836
Epoch 49: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 50ms/step - loss: 0.0768 - acc: 0.9843 - val_loss: 0.7966 - val_acc: 0.8718
Epoch 50/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0822 - acc: 0.9747
Epoch 50: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 52ms/step - loss: 0.0792 - acc: 0.9758 - val_loss: 0.9310 - val_acc: 0.8974
Epoch 51/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0432 - acc: 0.9836
Epoch 51: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 50ms/step - loss: 0.0508 - acc: 0.9815 - val_loss: 0.9552 - val_acc: 0.9359
Epoch 52/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0894 - acc: 0.9762
Epoch 52: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 51ms/step - loss: 0.0884 - acc: 0.9758 - val_loss: 0.8157 - val_acc: 0.8462
Epoch 53/70
22/22 [==============================] - ETA: 0s - loss: 0.0584 - acc: 0.9801
Epoch 53: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 54ms/step - loss: 0.0584 - acc: 0.9801 - val_loss: 0.8009 - val_acc: 0.8974
Epoch 54/70
22/22 [==============================] - ETA: 0s - loss: 0.0812 - acc: 0.9801
Epoch 54: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 55ms/step - loss: 0.0812 - acc: 0.9801 - val_loss: 0.8985 - val_acc: 0.8846
Epoch 55/70
22/22 [==============================] - ETA: 0s - loss: 0.0529 - acc: 0.9900
Epoch 55: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 53ms/step - loss: 0.0529 - acc: 0.9900 - val_loss: 0.8794 - val_acc: 0.8846
Epoch 56/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0588 - acc: 0.9881
Epoch 56: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 54ms/step - loss: 0.0600 - acc: 0.9872 - val_loss: 0.9116 - val_acc: 0.9103
Epoch 57/70
21/22 [===========================>..] - ETA: 0s - loss: 0.1017 - acc: 0.9836
Epoch 57: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 51ms/step - loss: 0.0995 - acc: 0.9843 - val_loss: 0.8361 - val_acc: 0.8974
Epoch 58/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0512 - acc: 0.9821
Epoch 58: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 50ms/step - loss: 0.0502 - acc: 0.9829 - val_loss: 0.8800 - val_acc: 0.8974
Epoch 59/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0536 - acc: 0.9807
Epoch 59: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 50ms/step - loss: 0.0514 - acc: 0.9815 - val_loss: 0.8436 - val_acc: 0.8974
Epoch 60/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0443 - acc: 0.9926
Epoch 60: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 52ms/step - loss: 0.0507 - acc: 0.9915 - val_loss: 0.8737 - val_acc: 0.9103
Epoch 61/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0805 - acc: 0.9807
Epoch 61: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 52ms/step - loss: 0.0806 - acc: 0.9801 - val_loss: 0.8673 - val_acc: 0.8974
Epoch 62/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0441 - acc: 0.9807
Epoch 62: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 52ms/step - loss: 0.0446 - acc: 0.9801 - val_loss: 0.9017 - val_acc: 0.8974
Epoch 63/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0427 - acc: 0.9926
Epoch 63: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 51ms/step - loss: 0.0450 - acc: 0.9915 - val_loss: 0.8985 - val_acc: 0.9231
Epoch 64/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0432 - acc: 0.9896
Epoch 64: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 52ms/step - loss: 0.0417 - acc: 0.9900 - val_loss: 0.8683 - val_acc: 0.9103
Epoch 65/70
22/22 [==============================] - ETA: 0s - loss: 0.1053 - acc: 0.9786
Epoch 65: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 54ms/step - loss: 0.1053 - acc: 0.9786 - val_loss: 1.2300 - val_acc: 0.8974
Epoch 66/70
22/22 [==============================] - ETA: 0s - loss: 0.0581 - acc: 0.9872
Epoch 66: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 55ms/step - loss: 0.0581 - acc: 0.9872 - val_loss: 1.3112 - val_acc: 0.8846
Epoch 67/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0593 - acc: 0.9792
Epoch 67: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 56ms/step - loss: 0.0579 - acc: 0.9801 - val_loss: 1.1377 - val_acc: 0.8846
Epoch 68/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0397 - acc: 0.9866
Epoch 68: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 51ms/step - loss: 0.0431 - acc: 0.9858 - val_loss: 1.0425 - val_acc: 0.8974
Epoch 69/70
22/22 [==============================] - ETA: 0s - loss: 0.0430 - acc: 0.9829
Epoch 69: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 53ms/step - loss: 0.0430 - acc: 0.9829 - val_loss: 1.1069 - val_acc: 0.8718
Epoch 70/70
21/22 [===========================>..] - ETA: 0s - loss: 0.0318 - acc: 0.9896
Epoch 70: val_loss did not improve from 0.30430
22/22 [==============================] - 1s 51ms/step - loss: 0.0393 - acc: 0.9872 - val_loss: 1.3194 - val_acc: 0.8846
In [ ]:
plt.figure(figsize = (20,5))
plt.subplot(1,2,1)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['training_loss', 'validation_loss'])
plt.xlabel('Epochs')
plt.ylabel('Losses')
plt.title('Loss val wrt. Epochs', fontsize = 15)
plt.subplot(1,2,2)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.legend(['training_acc', 'validation_acc'])
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.title('Accuracy wrt. Epochs', fontsize = 15)
Out[ ]:
Text(0.5, 1.0, 'Accuracy wrt. Epochs')
In [ ]:
vgg16_model = load_model('/content/drive/MyDrive/breast cancer models/vgg_classifier_70_v3.h5')
In [ ]:
img_test_prediction = vgg16_model.predict(img_test)
3/3 [==============================] - 1s 65ms/step
In [ ]:
img_test_prediction = np.argmax(img_test_prediction, axis = 1)
labels_test = np.argmax(labels_test, axis = 1)
In [ ]:
print('Accuracy : ' + str(accuracy_score(labels_test, img_test_prediction)))
print(classification_report(labels_test, img_test_prediction, target_names = info))
Accuracy : 0.8846153846153846
              precision    recall  f1-score   support

      normal       0.81      0.93      0.87        14
      benign       0.93      0.86      0.89        44
   malignant       0.86      0.90      0.88        20

    accuracy                           0.88        78
   macro avg       0.87      0.90      0.88        78
weighted avg       0.89      0.88      0.89        78

In [ ]:
cm = confusion_matrix(labels_test,img_test_prediction)
In [ ]:
plt.figure(figsize = (5,5))
ax = seaborn.heatmap(cm, cmap=plt.cm.Greens, annot=True, square=True, xticklabels = info, yticklabels = info)
ax.set_ylabel('Actual', fontsize=40)
ax.set_xlabel('Predicted', fontsize=40)
Out[ ]:
Text(0.5, 63.222222222222186, 'Predicted')

Checking confusion matrix for entire dataset

In [ ]:
colored_images.shape
Out[ ]:
(780, 128, 128, 3)
In [ ]:
image_prediction = vgg16_model.predict(colored_images)
25/25 [==============================] - 1s 47ms/step
In [ ]:
image_prediction = np.argmax(image_prediction, axis = 1)
labels = np.argmax(labels, axis = 1)
In [ ]:
image_prediction.shape
Out[ ]:
(780,)
In [ ]:
labels.shape
Out[ ]:
(780,)
In [ ]:
print('Accuracy : ' + str(accuracy_score(labels, image_prediction)))
print(classification_report(labels, image_prediction, target_names = info))
Accuracy : 0.9846153846153847
              precision    recall  f1-score   support

      normal       0.97      0.99      0.98       133
      benign       0.99      0.98      0.99       437
   malignant       0.98      0.99      0.98       210

    accuracy                           0.98       780
   macro avg       0.98      0.99      0.98       780
weighted avg       0.98      0.98      0.98       780

In [ ]:
cm = confusion_matrix(labels,image_prediction)
cm
Out[ ]:
array([[132,   1,   0],
       [  4, 429,   4],
       [  0,   3, 207]])
In [ ]:
plt.figure(figsize = (5,5))
ax = seaborn.heatmap(cm, cmap=plt.cm.Greens, annot=True, square=True, xticklabels = info, yticklabels = info)
ax.set_ylabel('Actual', fontsize=40)
ax.set_xlabel('Predicted', fontsize=40)
Out[ ]:
Text(0.5, 63.222222222222186, 'Predicted')

Creating new dataset from prediction¶

In [ ]:
benign_images = []
malignant_images = []
normal_images = []
benign_masks = []
malignant_masks = []
normal_masks = []
image_prediction = vgg16_model.predict(colored_images)
image_prediction = np.argmax(image_prediction, axis = 1)
for i in range(len(colored_images)):
    img = colored_images[i]
    prediction = image_prediction[i]
    mask = masks[i]

    if prediction == 1:
        benign_images.append(img)
        benign_masks.append(mask)
    elif prediction == 2:
        malignant_images.append(img)
        malignant_masks.append(mask)
    elif prediction == 0:
        normal_images.append(img)
        normal_masks.append(mask)
25/25 [==============================] - 1s 44ms/step
In [ ]:
print(len(benign_images))   #437
print(len(malignant_images)) #210
print(len(normal_images))  #133
433
211
136
In [ ]:
print(len(benign_masks))   #437
print(len(malignant_masks)) #210
print(len(normal_masks))
433
211
136
In [ ]:
new_dataset_path = '/content/drive/MyDrive/Segmentation dataset'
os.makedirs(new_dataset_path, exist_ok=True)
In [ ]:
path_benign_image = '/content/drive/MyDrive/Segmentation dataset/benign/image'
path_malignant_image = '/content/drive/MyDrive/Segmentation dataset/malignant/image'
path_normal_image = '/content/drive/MyDrive/Segmentation dataset/normal/image'
path_benign_mask = '/content/drive/MyDrive/Segmentation dataset/benign/mask'
path_malignant_mask = '/content/drive/MyDrive/Segmentation dataset/malignant/mask'
path_normal_mask = '/content/drive/MyDrive/Segmentation dataset/normal/mask'

os.makedirs(path_benign_image, exist_ok=True)
os.makedirs(path_malignant_image, exist_ok=True)
os.makedirs(path_normal_image, exist_ok=True)
os.makedirs(path_benign_mask, exist_ok=True)
os.makedirs(path_malignant_mask, exist_ok=True)
os.makedirs(path_normal_mask, exist_ok=True)

for i, img in enumerate(benign_images):
    filename = f'benign_{i}.jpg'
    cv2.imwrite(os.path.join(path_benign_image, filename), img*255)
    mask_filename = f'benign_{i}_mask.jpg'
    cv2.imwrite(os.path.join(path_benign_mask, mask_filename), benign_masks[i])

for i, img in enumerate(malignant_images):
    filename = f'malignant_{i}.jpg'
    cv2.imwrite(os.path.join(path_malignant_image, filename), img*255)
    mask_filename = f'malignant_{i}_mask.jpg'
    cv2.imwrite(os.path.join(path_malignant_mask, mask_filename), malignant_masks[i])


for i, img in enumerate(normal_images):
    filename = f'normal_{i}.jpg'
    cv2.imwrite(os.path.join(path_normal_image, filename), img*255)
    mask_filename = f'normal_{i}_mask.jpg'
    cv2.imwrite(os.path.join(path_normal_mask, mask_filename), normal_masks[i])
In [ ]:
benign_image = [f for f in os.listdir(path_benign_image) if os.path.isfile(os.path.join(path_benign_image, f))]
print("Number of bening images in segmentation dataset:",len(benign_image))
benign_mask = [f for f in os.listdir(path_benign_mask) if os.path.isfile(os.path.join(path_benign_mask, f))]
print("Number of bening masks in segmentation dataset:",len(benign_mask))

malignant_image = [f for f in os.listdir(path_malignant_image) if os.path.isfile(os.path.join(path_malignant_image, f))]
print("Number of malignant images in segmentation dataset:",len(malignant_image))
malignant_mask = [f for f in os.listdir(path_malignant_mask) if os.path.isfile(os.path.join(path_malignant_mask, f))]
print("Number of malignant masks in segmentation dataset:",len(malignant_mask))

normal_image = [f for f in os.listdir(path_normal_image) if os.path.isfile(os.path.join(path_normal_image, f))]
print("Number of normal images in segmentation dataset:",len(normal_image))
normal_mask = [f for f in os.listdir(path_normal_mask) if os.path.isfile(os.path.join(path_normal_mask, f))]
print("Number of normal masks in segmentation dataset:",len(normal_mask))
Number of bening images in segmentation dataset: 433
Number of bening masks in segmentation dataset: 433
Number of malignant images in segmentation dataset: 211
Number of malignant masks in segmentation dataset: 211
Number of normal images in segmentation dataset: 136
Number of normal masks in segmentation dataset: 136

Image Segmentation¶

Loading dataset¶

In [ ]:
img_b, mask_b = np.zeros((433, 128, 128, 1)), np.zeros((433, 128, 128, 1))
img_m, mask_m = np.zeros((211, 128, 128, 1)), np.zeros((211, 128, 128, 1))
In [ ]:
path_benign_image = '/content/drive/MyDrive/Segmentation dataset/benign/image'
path_malignant_image = '/content/drive/MyDrive/Segmentation dataset/malignant/image'
path_benign_mask = '/content/drive/MyDrive/Segmentation dataset/benign/mask'
path_malignant_mask = '/content/drive/MyDrive/Segmentation dataset/malignant/mask'
In [ ]:
for i in range(len(os.listdir(path_benign_image))):
  filename = os.listdir(path_benign_image)[i].split(".")
  img = cv2.imread(os.path.join(path_benign_image,os.listdir(path_benign_image)[i]),cv2.IMREAD_GRAYSCALE)
  pil_img = Image.fromarray (img)
  img_b[i] = img_to_array(pil_img)
  mask = cv2.imread(os.path.join(path_benign_mask,(filename[0]+"_mask."+filename[1])),cv2.IMREAD_GRAYSCALE)
  pil_mask = Image.fromarray (mask)
  mask_b[i] = img_to_array(pil_mask)

for i in range(len(os.listdir(path_malignant_image))):
  filename = os.listdir(path_malignant_image)[i].split(".")
  img = cv2.imread(os.path.join(path_malignant_image,os.listdir(path_malignant_image)[i]),cv2.IMREAD_GRAYSCALE)
  pil_img = Image.fromarray (img)
  img_m[i] = img_to_array(pil_img)
  mask = cv2.imread(os.path.join(path_malignant_mask,(filename[0]+"_mask."+filename[1])),cv2.IMREAD_GRAYSCALE)
  pil_mask = Image.fromarray (mask)
  mask_m[i] = img_to_array(pil_mask)

Creating datasets for model training and validation

In [ ]:
scan = np.concatenate((img_b, img_m), axis = 0)
mask = np.concatenate((mask_b, mask_m), axis = 0)
In [ ]:
plt.figure(figsize = (20,8))
for i in range(5) :
    plt.subplot(2,5,i+1)
    plt.imshow(scan[i+1], 'gray')
    plt.title('Real Image')
    plt.axis('off')

for i in range(5) :
    plt.subplot(2,5,i+6)
    plt.imshow(mask[i+1], 'gray')
    plt.title('Mask Image')
    plt.axis('off')
plt.show()

Data Preprocessing¶

Normalization

In [ ]:
scan /= 255.0
mask /= 255.0
In [ ]:
print(scan.shape)
print(mask.shape)
(644, 128, 128, 1)
(644, 128, 128, 1)

Thresholding

In [ ]:
print(scan.max())
print(scan.min())
1.0
0.0
In [ ]:
print(mask.max())
print(mask.min())
1.0
0.0
In [ ]:
scan[scan > 1.0] = 1.0
mask[mask > 1.0] = 1.0
In [ ]:
print(scan.max())
print(scan.min())
print(mask.max())
print(mask.min())
1.0
0.0
1.0
0.0

Visualization

In [ ]:
plt.figure(figsize = (20,8))
for i in range(5) :
    x = np.random.randint(0,644)
    plt.subplot(2,5,i+1)
    plt.imshow(scan[x], 'gray')
    plt.title('Real Image')
    plt.axis('off')
    plt.subplot(2,5,i+6)
    plt.imshow(mask[x], 'gray')
    plt.title('Mask Image')
    plt.axis('off')
plt.show()

Train test split¶

In [ ]:
scan_train, scan_test, mask_train, mask_test = train_test_split(scan, mask, test_size = 0.1, random_state = 1)
In [ ]:
print(scan_train.shape)
print(mask_train.shape)
(579, 128, 128, 1)
(579, 128, 128, 1)
In [ ]:
print(scan_test.shape)
print(mask_test.shape)
(65, 128, 128, 1)
(65, 128, 128, 1)

Evaluation Metrics¶

In [ ]:
def dice_coeff(y_true, y_pred):
    smooth = 1.
    y_true_f = tf.reshape(y_true, [-1])
    y_pred_f = tf.reshape(y_pred, [-1])
    intersection = tf.reduce_sum(y_true_f * y_pred_f)
    score = (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)
    return score
In [ ]:
def dice_loss(y_true, y_pred):
    loss = 1 - dice_coeff(y_true, y_pred)
    return loss
In [ ]:
def iou(y_true, y_pred):
    y_true = K.flatten(y_true)
    y_pred = K.flatten(y_pred)

    intersection = K.sum(y_true * y_pred)
    union = K.sum(y_true) + K.sum(y_pred) - intersection

    iou = intersection / (union + K.epsilon())

    return iou

U-net¶

Building the model¶

Contracting path

In [ ]:
inply = Input((128, 128, 1,))

conv1 = Conv2D(2**6, (3,3), activation = 'relu', padding = 'same')(inply)
conv1 = Conv2D(2**6, (3,3), activation = 'relu', padding = 'same')(conv1)
pool1 = MaxPooling2D((2,2), strides = 2, padding = 'same')(conv1)
drop1 = Dropout(0.2)(pool1)

conv2 = Conv2D(2**7, (3,3), activation = 'relu', padding = 'same')(drop1)
conv2 = Conv2D(2**7, (3,3), activation = 'relu', padding = 'same')(conv2)
pool2 = MaxPooling2D((2,2), strides = 2, padding = 'same')(conv2)
drop2 = Dropout(0.2)(pool2)

conv3 = Conv2D(2**8, (3,3), activation = 'relu', padding = 'same')(drop2)
conv3 = Conv2D(2**8, (3,3), activation = 'relu', padding = 'same')(conv3)
pool3 = MaxPooling2D((2,2), strides = 2, padding = 'same')(conv3)
drop3 = Dropout(0.2)(pool3)

conv4 = Conv2D(2**9, (3,3), activation = 'relu', padding = 'same')(drop3)
conv4 = Conv2D(2**9, (3,3), activation = 'relu', padding = 'same')(conv4)
pool4 = MaxPooling2D((2,2), strides = 2, padding = 'same')(conv4)
drop4 = Dropout(0.2)(pool4)

Bottleneck layer

In [ ]:
convm = Conv2D(2**10, (3,3), activation = 'relu', padding = 'same')(drop4)
convm = Conv2D(2**10, (3,3), activation = 'relu', padding = 'same')(convm)

Expanding layer

In [ ]:
tran5 = Conv2DTranspose(2**9, (2,2), strides = 2, padding = 'valid', activation = 'relu')(convm)
conc5 = Concatenate()([tran5, conv4])
conv5 = Conv2D(2**9, (3,3), activation = 'relu', padding = 'same')(conc5)
conv5 = Conv2D(2**9, (3,3), activation = 'relu', padding = 'same')(conv5)
drop5 = Dropout(0.1)(conv5)

tran6 = Conv2DTranspose(2**8, (2,2), strides = 2, padding = 'valid', activation = 'relu')(drop5)
conc6 = Concatenate()([tran6, conv3])
conv6 = Conv2D(2**8, (3,3), activation = 'relu', padding = 'same')(conc6)
conv6 = Conv2D(2**8, (3,3), activation = 'relu', padding = 'same')(conv6)
drop6 = Dropout(0.1)(conv6)

tran7 = Conv2DTranspose(2**7, (2,2), strides = 2, padding = 'valid', activation = 'relu')(drop6)
conc7 = Concatenate()([tran7, conv2])
conv7 = Conv2D(2**7, (3,3), activation = 'relu', padding = 'same')(conc7)
conv7 = Conv2D(2**7, (3,3), activation = 'relu', padding = 'same')(conv7)
drop7 = Dropout(0.1)(conv7)

tran8 = Conv2DTranspose(2**6, (2,2), strides = 2, padding = 'valid', activation = 'relu')(drop7)
conc8 = Concatenate()([tran8, conv1])
conv8 = Conv2D(2**6, (3,3), activation = 'relu', padding = 'same')(conc8)
conv8 = Conv2D(2**6, (3,3), activation = 'relu', padding = 'same')(conv8)
drop8 = Dropout(0.1)(conv8)
In [ ]:
outly = Conv2D(2**0, (1,1), activation = 'relu', padding = 'same')(drop8)
unet_model = Model(inputs = inply, outputs = outly, name = 'U-net')
In [ ]:
keras.utils.plot_model(unet_model, './model_plot.png', show_shapes = True)
Out[ ]:

Training¶

In [ ]:
unet_model.compile(loss = BinaryCrossentropy(), optimizer = keras.optimizers.Adam(learning_rate = 0.00005),metrics = ['accuracy',dice_loss,iou])
print(unet_model.summary())
Model: "U-net"
__________________________________________________________________________________________________
 Layer (type)                Output Shape                 Param #   Connected to                  
==================================================================================================
 input_2 (InputLayer)        [(None, 128, 128, 1)]        0         []                            
                                                                                                  
 conv2d_19 (Conv2D)          (None, 128, 128, 64)         640       ['input_2[0][0]']             
                                                                                                  
 conv2d_20 (Conv2D)          (None, 128, 128, 64)         36928     ['conv2d_19[0][0]']           
                                                                                                  
 max_pooling2d_4 (MaxPoolin  (None, 64, 64, 64)           0         ['conv2d_20[0][0]']           
 g2D)                                                                                             
                                                                                                  
 dropout_8 (Dropout)         (None, 64, 64, 64)           0         ['max_pooling2d_4[0][0]']     
                                                                                                  
 conv2d_21 (Conv2D)          (None, 64, 64, 128)          73856     ['dropout_8[0][0]']           
                                                                                                  
 conv2d_22 (Conv2D)          (None, 64, 64, 128)          147584    ['conv2d_21[0][0]']           
                                                                                                  
 max_pooling2d_5 (MaxPoolin  (None, 32, 32, 128)          0         ['conv2d_22[0][0]']           
 g2D)                                                                                             
                                                                                                  
 dropout_9 (Dropout)         (None, 32, 32, 128)          0         ['max_pooling2d_5[0][0]']     
                                                                                                  
 conv2d_23 (Conv2D)          (None, 32, 32, 256)          295168    ['dropout_9[0][0]']           
                                                                                                  
 conv2d_24 (Conv2D)          (None, 32, 32, 256)          590080    ['conv2d_23[0][0]']           
                                                                                                  
 max_pooling2d_6 (MaxPoolin  (None, 16, 16, 256)          0         ['conv2d_24[0][0]']           
 g2D)                                                                                             
                                                                                                  
 dropout_10 (Dropout)        (None, 16, 16, 256)          0         ['max_pooling2d_6[0][0]']     
                                                                                                  
 conv2d_25 (Conv2D)          (None, 16, 16, 512)          1180160   ['dropout_10[0][0]']          
                                                                                                  
 conv2d_26 (Conv2D)          (None, 16, 16, 512)          2359808   ['conv2d_25[0][0]']           
                                                                                                  
 max_pooling2d_7 (MaxPoolin  (None, 8, 8, 512)            0         ['conv2d_26[0][0]']           
 g2D)                                                                                             
                                                                                                  
 dropout_11 (Dropout)        (None, 8, 8, 512)            0         ['max_pooling2d_7[0][0]']     
                                                                                                  
 conv2d_27 (Conv2D)          (None, 8, 8, 1024)           4719616   ['dropout_11[0][0]']          
                                                                                                  
 conv2d_28 (Conv2D)          (None, 8, 8, 1024)           9438208   ['conv2d_27[0][0]']           
                                                                                                  
 conv2d_transpose_4 (Conv2D  (None, 16, 16, 512)          2097664   ['conv2d_28[0][0]']           
 Transpose)                                                                                       
                                                                                                  
 concatenate_4 (Concatenate  (None, 16, 16, 1024)         0         ['conv2d_transpose_4[0][0]',  
 )                                                                   'conv2d_26[0][0]']           
                                                                                                  
 conv2d_29 (Conv2D)          (None, 16, 16, 512)          4719104   ['concatenate_4[0][0]']       
                                                                                                  
 conv2d_30 (Conv2D)          (None, 16, 16, 512)          2359808   ['conv2d_29[0][0]']           
                                                                                                  
 dropout_12 (Dropout)        (None, 16, 16, 512)          0         ['conv2d_30[0][0]']           
                                                                                                  
 conv2d_transpose_5 (Conv2D  (None, 32, 32, 256)          524544    ['dropout_12[0][0]']          
 Transpose)                                                                                       
                                                                                                  
 concatenate_5 (Concatenate  (None, 32, 32, 512)          0         ['conv2d_transpose_5[0][0]',  
 )                                                                   'conv2d_24[0][0]']           
                                                                                                  
 conv2d_31 (Conv2D)          (None, 32, 32, 256)          1179904   ['concatenate_5[0][0]']       
                                                                                                  
 conv2d_32 (Conv2D)          (None, 32, 32, 256)          590080    ['conv2d_31[0][0]']           
                                                                                                  
 dropout_13 (Dropout)        (None, 32, 32, 256)          0         ['conv2d_32[0][0]']           
                                                                                                  
 conv2d_transpose_6 (Conv2D  (None, 64, 64, 128)          131200    ['dropout_13[0][0]']          
 Transpose)                                                                                       
                                                                                                  
 concatenate_6 (Concatenate  (None, 64, 64, 256)          0         ['conv2d_transpose_6[0][0]',  
 )                                                                   'conv2d_22[0][0]']           
                                                                                                  
 conv2d_33 (Conv2D)          (None, 64, 64, 128)          295040    ['concatenate_6[0][0]']       
                                                                                                  
 conv2d_34 (Conv2D)          (None, 64, 64, 128)          147584    ['conv2d_33[0][0]']           
                                                                                                  
 dropout_14 (Dropout)        (None, 64, 64, 128)          0         ['conv2d_34[0][0]']           
                                                                                                  
 conv2d_transpose_7 (Conv2D  (None, 128, 128, 64)         32832     ['dropout_14[0][0]']          
 Transpose)                                                                                       
                                                                                                  
 concatenate_7 (Concatenate  (None, 128, 128, 128)        0         ['conv2d_transpose_7[0][0]',  
 )                                                                   'conv2d_20[0][0]']           
                                                                                                  
 conv2d_35 (Conv2D)          (None, 128, 128, 64)         73792     ['concatenate_7[0][0]']       
                                                                                                  
 conv2d_36 (Conv2D)          (None, 128, 128, 64)         36928     ['conv2d_35[0][0]']           
                                                                                                  
 dropout_15 (Dropout)        (None, 128, 128, 64)         0         ['conv2d_36[0][0]']           
                                                                                                  
 conv2d_37 (Conv2D)          (None, 128, 128, 1)          65        ['dropout_15[0][0]']          
                                                                                                  
==================================================================================================
Total params: 31030593 (118.37 MB)
Trainable params: 31030593 (118.37 MB)
Non-trainable params: 0 (0.00 Byte)
__________________________________________________________________________________________________
None
In [ ]:
checkp = ModelCheckpoint('./unet_70_v3.h5',save_best_only = True, verbose = 1)
In [ ]:
history = unet_model.fit(scan_train, mask_train, epochs = 70, batch_size = 8, validation_data = (scan_test, mask_test), callbacks = [checkp])
Epoch 1/70
73/73 [==============================] - ETA: 0s - loss: 0.3289 - accuracy: 0.8873 - dice_loss: 0.9004 - iou: 0.0529
Epoch 1: val_loss improved from inf to 0.25395, saving model to ./unet_70_v3.h5
/usr/local/lib/python3.10/dist-packages/keras/src/engine/training.py:3079: UserWarning: You are saving your model as an HDF5 file via `model.save()`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')`.
  saving_api.save_model(
73/73 [==============================] - 44s 274ms/step - loss: 0.3289 - accuracy: 0.8873 - dice_loss: 0.9004 - iou: 0.0529 - val_loss: 0.2539 - val_accuracy: 0.9005 - val_dice_loss: 0.8601 - val_iou: 0.0753
Epoch 2/70
73/73 [==============================] - ETA: 0s - loss: 0.2522 - accuracy: 0.8901 - dice_loss: 0.7925 - iou: 0.1172
Epoch 2: val_loss improved from 0.25395 to 0.21536, saving model to ./unet_70_v3.h5
73/73 [==============================] - 13s 171ms/step - loss: 0.2522 - accuracy: 0.8901 - dice_loss: 0.7925 - iou: 0.1172 - val_loss: 0.2154 - val_accuracy: 0.9007 - val_dice_loss: 0.7947 - val_iou: 0.1151
Epoch 3/70
73/73 [==============================] - ETA: 0s - loss: 0.2217 - accuracy: 0.8983 - dice_loss: 0.6940 - iou: 0.1829
Epoch 3: val_loss improved from 0.21536 to 0.21336, saving model to ./unet_70_v3.h5
73/73 [==============================] - 17s 235ms/step - loss: 0.2217 - accuracy: 0.8983 - dice_loss: 0.6940 - iou: 0.1829 - val_loss: 0.2134 - val_accuracy: 0.9031 - val_dice_loss: 0.7459 - val_iou: 0.1471
Epoch 4/70
73/73 [==============================] - ETA: 0s - loss: 0.2136 - accuracy: 0.9026 - dice_loss: 0.6554 - iou: 0.2118
Epoch 4: val_loss improved from 0.21336 to 0.18372, saving model to ./unet_70_v3.h5
73/73 [==============================] - 13s 175ms/step - loss: 0.2136 - accuracy: 0.9026 - dice_loss: 0.6554 - iou: 0.2118 - val_loss: 0.1837 - val_accuracy: 0.9108 - val_dice_loss: 0.6952 - val_iou: 0.1812
Epoch 5/70
73/73 [==============================] - ETA: 0s - loss: 0.1995 - accuracy: 0.9052 - dice_loss: 0.6256 - iou: 0.2359
Epoch 5: val_loss did not improve from 0.18372
73/73 [==============================] - 12s 165ms/step - loss: 0.1995 - accuracy: 0.9052 - dice_loss: 0.6256 - iou: 0.2359 - val_loss: 0.1869 - val_accuracy: 0.9067 - val_dice_loss: 0.6920 - val_iou: 0.1845
Epoch 6/70
73/73 [==============================] - ETA: 0s - loss: 0.2039 - accuracy: 0.9039 - dice_loss: 0.6359 - iou: 0.2275
Epoch 6: val_loss improved from 0.18372 to 0.17591, saving model to ./unet_70_v3.h5
73/73 [==============================] - 13s 174ms/step - loss: 0.2039 - accuracy: 0.9039 - dice_loss: 0.6359 - iou: 0.2275 - val_loss: 0.1759 - val_accuracy: 0.9101 - val_dice_loss: 0.6550 - val_iou: 0.2122
Epoch 7/70
73/73 [==============================] - ETA: 0s - loss: 0.1801 - accuracy: 0.9125 - dice_loss: 0.5613 - iou: 0.2866
Epoch 7: val_loss improved from 0.17591 to 0.15603, saving model to ./unet_70_v3.h5
73/73 [==============================] - 13s 176ms/step - loss: 0.1801 - accuracy: 0.9125 - dice_loss: 0.5613 - iou: 0.2866 - val_loss: 0.1560 - val_accuracy: 0.9226 - val_dice_loss: 0.5630 - val_iou: 0.2856
Epoch 8/70
73/73 [==============================] - ETA: 0s - loss: 0.2635 - accuracy: 0.8944 - dice_loss: 0.7818 - iou: 0.1348
Epoch 8: val_loss did not improve from 0.15603
73/73 [==============================] - 12s 164ms/step - loss: 0.2635 - accuracy: 0.8944 - dice_loss: 0.7818 - iou: 0.1348 - val_loss: 0.2116 - val_accuracy: 0.9026 - val_dice_loss: 0.7610 - val_iou: 0.1378
Epoch 9/70
73/73 [==============================] - ETA: 0s - loss: 0.1901 - accuracy: 0.9108 - dice_loss: 0.5909 - iou: 0.2641
Epoch 9: val_loss did not improve from 0.15603
73/73 [==============================] - 12s 162ms/step - loss: 0.1901 - accuracy: 0.9108 - dice_loss: 0.5909 - iou: 0.2641 - val_loss: 0.1938 - val_accuracy: 0.9090 - val_dice_loss: 0.6621 - val_iou: 0.2079
Epoch 10/70
73/73 [==============================] - ETA: 0s - loss: 0.1852 - accuracy: 0.9113 - dice_loss: 0.5728 - iou: 0.2803
Epoch 10: val_loss improved from 0.15603 to 0.15045, saving model to ./unet_70_v3.h5
73/73 [==============================] - 13s 177ms/step - loss: 0.1852 - accuracy: 0.9113 - dice_loss: 0.5728 - iou: 0.2803 - val_loss: 0.1504 - val_accuracy: 0.9264 - val_dice_loss: 0.5367 - val_iou: 0.3099
Epoch 11/70
73/73 [==============================] - ETA: 0s - loss: 0.2019 - accuracy: 0.9059 - dice_loss: 0.6306 - iou: 0.2351
Epoch 11: val_loss did not improve from 0.15045
73/73 [==============================] - 12s 160ms/step - loss: 0.2019 - accuracy: 0.9059 - dice_loss: 0.6306 - iou: 0.2351 - val_loss: 0.1677 - val_accuracy: 0.9192 - val_dice_loss: 0.6195 - val_iou: 0.2409
Epoch 12/70
73/73 [==============================] - ETA: 0s - loss: 0.1719 - accuracy: 0.9178 - dice_loss: 0.5073 - iou: 0.3367
Epoch 12: val_loss improved from 0.15045 to 0.14515, saving model to ./unet_70_v3.h5
73/73 [==============================] - 16s 217ms/step - loss: 0.1719 - accuracy: 0.9178 - dice_loss: 0.5073 - iou: 0.3367 - val_loss: 0.1451 - val_accuracy: 0.9278 - val_dice_loss: 0.5182 - val_iou: 0.3275
Epoch 13/70
73/73 [==============================] - ETA: 0s - loss: 0.1654 - accuracy: 0.9200 - dice_loss: 0.4893 - iou: 0.3529
Epoch 13: val_loss did not improve from 0.14515
73/73 [==============================] - 11s 157ms/step - loss: 0.1654 - accuracy: 0.9200 - dice_loss: 0.4893 - iou: 0.3529 - val_loss: 0.1490 - val_accuracy: 0.9218 - val_dice_loss: 0.5396 - val_iou: 0.3111
Epoch 14/70
73/73 [==============================] - ETA: 0s - loss: 0.2507 - accuracy: 0.8936 - dice_loss: 0.7840 - iou: 0.1276
Epoch 14: val_loss did not improve from 0.14515
73/73 [==============================] - 12s 158ms/step - loss: 0.2507 - accuracy: 0.8936 - dice_loss: 0.7840 - iou: 0.1276 - val_loss: 0.1858 - val_accuracy: 0.9085 - val_dice_loss: 0.6686 - val_iou: 0.2039
Epoch 15/70
73/73 [==============================] - ETA: 0s - loss: 0.1915 - accuracy: 0.9081 - dice_loss: 0.6014 - iou: 0.2532
Epoch 15: val_loss did not improve from 0.14515
73/73 [==============================] - 12s 162ms/step - loss: 0.1915 - accuracy: 0.9081 - dice_loss: 0.6014 - iou: 0.2532 - val_loss: 0.1714 - val_accuracy: 0.9128 - val_dice_loss: 0.6167 - val_iou: 0.2473
Epoch 16/70
73/73 [==============================] - ETA: 0s - loss: 0.1871 - accuracy: 0.9089 - dice_loss: 0.5881 - iou: 0.2673
Epoch 16: val_loss did not improve from 0.14515
73/73 [==============================] - 12s 161ms/step - loss: 0.1871 - accuracy: 0.9089 - dice_loss: 0.5881 - iou: 0.2673 - val_loss: 0.1661 - val_accuracy: 0.9149 - val_dice_loss: 0.6160 - val_iou: 0.2435
Epoch 17/70
73/73 [==============================] - ETA: 0s - loss: 0.1698 - accuracy: 0.9165 - dice_loss: 0.5334 - iou: 0.3100
Epoch 17: val_loss did not improve from 0.14515
73/73 [==============================] - 11s 156ms/step - loss: 0.1698 - accuracy: 0.9165 - dice_loss: 0.5334 - iou: 0.3100 - val_loss: 0.1482 - val_accuracy: 0.9289 - val_dice_loss: 0.5030 - val_iou: 0.3465
Epoch 18/70
73/73 [==============================] - ETA: 0s - loss: 0.2445 - accuracy: 0.8995 - dice_loss: 0.7075 - iou: 0.1915
Epoch 18: val_loss did not improve from 0.14515
73/73 [==============================] - 11s 156ms/step - loss: 0.2445 - accuracy: 0.8995 - dice_loss: 0.7075 - iou: 0.1915 - val_loss: 0.2350 - val_accuracy: 0.9005 - val_dice_loss: 0.8504 - val_iou: 0.0810
Epoch 19/70
73/73 [==============================] - ETA: 0s - loss: 0.2145 - accuracy: 0.8995 - dice_loss: 0.6874 - iou: 0.1892
Epoch 19: val_loss did not improve from 0.14515
73/73 [==============================] - 12s 160ms/step - loss: 0.2145 - accuracy: 0.8995 - dice_loss: 0.6874 - iou: 0.1892 - val_loss: 0.1643 - val_accuracy: 0.9243 - val_dice_loss: 0.5884 - val_iou: 0.2665
Epoch 20/70
73/73 [==============================] - ETA: 0s - loss: 0.1828 - accuracy: 0.9059 - dice_loss: 0.6137 - iou: 0.2446
Epoch 20: val_loss did not improve from 0.14515
73/73 [==============================] - 12s 160ms/step - loss: 0.1828 - accuracy: 0.9059 - dice_loss: 0.6137 - iou: 0.2446 - val_loss: 0.1508 - val_accuracy: 0.9140 - val_dice_loss: 0.5832 - val_iou: 0.2725
Epoch 21/70
73/73 [==============================] - ETA: 0s - loss: 0.1533 - accuracy: 0.9216 - dice_loss: 0.4768 - iou: 0.3619
Epoch 21: val_loss improved from 0.14515 to 0.13394, saving model to ./unet_70_v3.h5
73/73 [==============================] - 17s 238ms/step - loss: 0.1533 - accuracy: 0.9216 - dice_loss: 0.4768 - iou: 0.3619 - val_loss: 0.1339 - val_accuracy: 0.9338 - val_dice_loss: 0.4741 - val_iou: 0.3721
Epoch 22/70
73/73 [==============================] - ETA: 0s - loss: 0.1433 - accuracy: 0.9254 - dice_loss: 0.4226 - iou: 0.4142
Epoch 22: val_loss improved from 0.13394 to 0.13225, saving model to ./unet_70_v3.h5
73/73 [==============================] - 13s 183ms/step - loss: 0.1433 - accuracy: 0.9254 - dice_loss: 0.4226 - iou: 0.4142 - val_loss: 0.1322 - val_accuracy: 0.9277 - val_dice_loss: 0.4728 - val_iou: 0.3693
Epoch 23/70
73/73 [==============================] - ETA: 0s - loss: 0.1446 - accuracy: 0.9221 - dice_loss: 0.4583 - iou: 0.3870
Epoch 23: val_loss improved from 0.13225 to 0.12527, saving model to ./unet_70_v3.h5
73/73 [==============================] - 13s 176ms/step - loss: 0.1446 - accuracy: 0.9221 - dice_loss: 0.4583 - iou: 0.3870 - val_loss: 0.1253 - val_accuracy: 0.9342 - val_dice_loss: 0.4041 - val_iou: 0.4437
Epoch 24/70
73/73 [==============================] - ETA: 0s - loss: 0.1313 - accuracy: 0.9294 - dice_loss: 0.3857 - iou: 0.4540
Epoch 24: val_loss did not improve from 0.12527
73/73 [==============================] - 12s 158ms/step - loss: 0.1313 - accuracy: 0.9294 - dice_loss: 0.3857 - iou: 0.4540 - val_loss: 0.1290 - val_accuracy: 0.9337 - val_dice_loss: 0.3801 - val_iou: 0.4725
Epoch 25/70
73/73 [==============================] - ETA: 0s - loss: 0.1304 - accuracy: 0.9290 - dice_loss: 0.3859 - iou: 0.4549
Epoch 25: val_loss did not improve from 0.12527
73/73 [==============================] - 12s 161ms/step - loss: 0.1304 - accuracy: 0.9290 - dice_loss: 0.3859 - iou: 0.4549 - val_loss: 0.1341 - val_accuracy: 0.9286 - val_dice_loss: 0.5141 - val_iou: 0.3281
Epoch 26/70
73/73 [==============================] - ETA: 0s - loss: 0.1487 - accuracy: 0.9214 - dice_loss: 0.4805 - iou: 0.3627
Epoch 26: val_loss improved from 0.12527 to 0.12335, saving model to ./unet_70_v3.h5
73/73 [==============================] - 15s 204ms/step - loss: 0.1487 - accuracy: 0.9214 - dice_loss: 0.4805 - iou: 0.3627 - val_loss: 0.1234 - val_accuracy: 0.9338 - val_dice_loss: 0.4176 - val_iou: 0.4292
Epoch 27/70
73/73 [==============================] - ETA: 0s - loss: 0.1290 - accuracy: 0.9298 - dice_loss: 0.3868 - iou: 0.4508
Epoch 27: val_loss did not improve from 0.12335
73/73 [==============================] - 12s 160ms/step - loss: 0.1290 - accuracy: 0.9298 - dice_loss: 0.3868 - iou: 0.4508 - val_loss: 0.1237 - val_accuracy: 0.9345 - val_dice_loss: 0.4328 - val_iou: 0.4117
Epoch 28/70
73/73 [==============================] - ETA: 0s - loss: 0.1175 - accuracy: 0.9338 - dice_loss: 0.3389 - iou: 0.5053
Epoch 28: val_loss improved from 0.12335 to 0.11410, saving model to ./unet_70_v3.h5
73/73 [==============================] - 17s 239ms/step - loss: 0.1175 - accuracy: 0.9338 - dice_loss: 0.3389 - iou: 0.5053 - val_loss: 0.1141 - val_accuracy: 0.9391 - val_dice_loss: 0.3683 - val_iou: 0.4853
Epoch 29/70
73/73 [==============================] - ETA: 0s - loss: 0.1471 - accuracy: 0.9209 - dice_loss: 0.4502 - iou: 0.4011
Epoch 29: val_loss did not improve from 0.11410
73/73 [==============================] - 12s 161ms/step - loss: 0.1471 - accuracy: 0.9209 - dice_loss: 0.4502 - iou: 0.4011 - val_loss: 0.1455 - val_accuracy: 0.9145 - val_dice_loss: 0.5400 - val_iou: 0.3149
Epoch 30/70
73/73 [==============================] - ETA: 0s - loss: 0.1440 - accuracy: 0.9235 - dice_loss: 0.4490 - iou: 0.3920
Epoch 30: val_loss did not improve from 0.11410
73/73 [==============================] - 12s 163ms/step - loss: 0.1440 - accuracy: 0.9235 - dice_loss: 0.4490 - iou: 0.3920 - val_loss: 0.1150 - val_accuracy: 0.9371 - val_dice_loss: 0.3648 - val_iou: 0.4817
Epoch 31/70
73/73 [==============================] - ETA: 0s - loss: 0.1125 - accuracy: 0.9357 - dice_loss: 0.2940 - iou: 0.5567
Epoch 31: val_loss did not improve from 0.11410
73/73 [==============================] - 12s 161ms/step - loss: 0.1125 - accuracy: 0.9357 - dice_loss: 0.2940 - iou: 0.5567 - val_loss: 0.1221 - val_accuracy: 0.9348 - val_dice_loss: 0.4055 - val_iou: 0.4400
Epoch 32/70
73/73 [==============================] - ETA: 0s - loss: 0.1079 - accuracy: 0.9365 - dice_loss: 0.3035 - iou: 0.5497
Epoch 32: val_loss improved from 0.11410 to 0.10894, saving model to ./unet_70_v3.h5
73/73 [==============================] - 17s 238ms/step - loss: 0.1079 - accuracy: 0.9365 - dice_loss: 0.3035 - iou: 0.5497 - val_loss: 0.1089 - val_accuracy: 0.9403 - val_dice_loss: 0.3501 - val_iou: 0.5010
Epoch 33/70
73/73 [==============================] - ETA: 0s - loss: 0.1054 - accuracy: 0.9393 - dice_loss: 0.2841 - iou: 0.5733
Epoch 33: val_loss did not improve from 0.10894
73/73 [==============================] - 12s 160ms/step - loss: 0.1054 - accuracy: 0.9393 - dice_loss: 0.2841 - iou: 0.5733 - val_loss: 0.1183 - val_accuracy: 0.9366 - val_dice_loss: 0.3332 - val_iou: 0.5258
Epoch 34/70
73/73 [==============================] - ETA: 0s - loss: 0.0988 - accuracy: 0.9403 - dice_loss: 0.2686 - iou: 0.5935
Epoch 34: val_loss improved from 0.10894 to 0.10620, saving model to ./unet_70_v3.h5
73/73 [==============================] - 13s 181ms/step - loss: 0.0988 - accuracy: 0.9403 - dice_loss: 0.2686 - iou: 0.5935 - val_loss: 0.1062 - val_accuracy: 0.9399 - val_dice_loss: 0.2900 - val_iou: 0.5799
Epoch 35/70
73/73 [==============================] - ETA: 0s - loss: 0.0956 - accuracy: 0.9411 - dice_loss: 0.2508 - iou: 0.6118
Epoch 35: val_loss did not improve from 0.10620
73/73 [==============================] - 12s 157ms/step - loss: 0.0956 - accuracy: 0.9411 - dice_loss: 0.2508 - iou: 0.6118 - val_loss: 0.1084 - val_accuracy: 0.9398 - val_dice_loss: 0.3302 - val_iou: 0.5288
Epoch 36/70
73/73 [==============================] - ETA: 0s - loss: 0.0939 - accuracy: 0.9414 - dice_loss: 0.2365 - iou: 0.6372
Epoch 36: val_loss did not improve from 0.10620
73/73 [==============================] - 12s 161ms/step - loss: 0.0939 - accuracy: 0.9414 - dice_loss: 0.2365 - iou: 0.6372 - val_loss: 0.1069 - val_accuracy: 0.9412 - val_dice_loss: 0.3202 - val_iou: 0.5485
Epoch 37/70
73/73 [==============================] - ETA: 0s - loss: 0.1032 - accuracy: 0.9391 - dice_loss: 0.2736 - iou: 0.5848
Epoch 37: val_loss did not improve from 0.10620
73/73 [==============================] - 11s 157ms/step - loss: 0.1032 - accuracy: 0.9391 - dice_loss: 0.2736 - iou: 0.5848 - val_loss: 0.1145 - val_accuracy: 0.9377 - val_dice_loss: 0.3845 - val_iou: 0.4650
Epoch 38/70
73/73 [==============================] - ETA: 0s - loss: 0.0937 - accuracy: 0.9419 - dice_loss: 0.2439 - iou: 0.6202
Epoch 38: val_loss did not improve from 0.10620
73/73 [==============================] - 11s 156ms/step - loss: 0.0937 - accuracy: 0.9419 - dice_loss: 0.2439 - iou: 0.6202 - val_loss: 0.1136 - val_accuracy: 0.9383 - val_dice_loss: 0.3180 - val_iou: 0.5521
Epoch 39/70
73/73 [==============================] - ETA: 0s - loss: 0.0879 - accuracy: 0.9436 - dice_loss: 0.2205 - iou: 0.6550
Epoch 39: val_loss did not improve from 0.10620
73/73 [==============================] - 11s 156ms/step - loss: 0.0879 - accuracy: 0.9436 - dice_loss: 0.2205 - iou: 0.6550 - val_loss: 0.1232 - val_accuracy: 0.9359 - val_dice_loss: 0.3197 - val_iou: 0.5472
Epoch 40/70
73/73 [==============================] - ETA: 0s - loss: 0.1273 - accuracy: 0.9276 - dice_loss: 0.3712 - iou: 0.4861
Epoch 40: val_loss did not improve from 0.10620
73/73 [==============================] - 11s 156ms/step - loss: 0.1273 - accuracy: 0.9276 - dice_loss: 0.3712 - iou: 0.4861 - val_loss: 0.1216 - val_accuracy: 0.9329 - val_dice_loss: 0.3636 - val_iou: 0.4905
Epoch 41/70
73/73 [==============================] - ETA: 0s - loss: 0.0914 - accuracy: 0.9429 - dice_loss: 0.2226 - iou: 0.6598
Epoch 41: val_loss did not improve from 0.10620
73/73 [==============================] - 12s 160ms/step - loss: 0.0914 - accuracy: 0.9429 - dice_loss: 0.2226 - iou: 0.6598 - val_loss: 0.1135 - val_accuracy: 0.9397 - val_dice_loss: 0.3302 - val_iou: 0.5253
Epoch 42/70
73/73 [==============================] - ETA: 0s - loss: 0.0797 - accuracy: 0.9472 - dice_loss: 0.1691 - iou: 0.7263
Epoch 42: val_loss did not improve from 0.10620
73/73 [==============================] - 12s 160ms/step - loss: 0.0797 - accuracy: 0.9472 - dice_loss: 0.1691 - iou: 0.7263 - val_loss: 0.1287 - val_accuracy: 0.9394 - val_dice_loss: 0.2526 - val_iou: 0.6333
Epoch 43/70
73/73 [==============================] - ETA: 0s - loss: 0.0750 - accuracy: 0.9485 - dice_loss: 0.1431 - iou: 0.7688
Epoch 43: val_loss did not improve from 0.10620
73/73 [==============================] - 12s 160ms/step - loss: 0.0750 - accuracy: 0.9485 - dice_loss: 0.1431 - iou: 0.7688 - val_loss: 0.1143 - val_accuracy: 0.9404 - val_dice_loss: 0.2124 - val_iou: 0.6908
Epoch 44/70
73/73 [==============================] - ETA: 0s - loss: 0.0853 - accuracy: 0.9449 - dice_loss: 0.2169 - iou: 0.6675
Epoch 44: val_loss did not improve from 0.10620
73/73 [==============================] - 11s 157ms/step - loss: 0.0853 - accuracy: 0.9449 - dice_loss: 0.2169 - iou: 0.6675 - val_loss: 0.1087 - val_accuracy: 0.9403 - val_dice_loss: 0.2933 - val_iou: 0.5726
Epoch 45/70
73/73 [==============================] - ETA: 0s - loss: 0.0812 - accuracy: 0.9466 - dice_loss: 0.1812 - iou: 0.7112
Epoch 45: val_loss did not improve from 0.10620
73/73 [==============================] - 12s 160ms/step - loss: 0.0812 - accuracy: 0.9466 - dice_loss: 0.1812 - iou: 0.7112 - val_loss: 0.1184 - val_accuracy: 0.9378 - val_dice_loss: 0.3395 - val_iou: 0.5231
Epoch 46/70
73/73 [==============================] - ETA: 0s - loss: 0.0805 - accuracy: 0.9460 - dice_loss: 0.1849 - iou: 0.7043
Epoch 46: val_loss did not improve from 0.10620
73/73 [==============================] - 11s 156ms/step - loss: 0.0805 - accuracy: 0.9460 - dice_loss: 0.1849 - iou: 0.7043 - val_loss: 0.1394 - val_accuracy: 0.9349 - val_dice_loss: 0.2431 - val_iou: 0.6512
Epoch 47/70
73/73 [==============================] - ETA: 0s - loss: 0.1184 - accuracy: 0.9391 - dice_loss: 0.2714 - iou: 0.6290
Epoch 47: val_loss did not improve from 0.10620
73/73 [==============================] - 12s 160ms/step - loss: 0.1184 - accuracy: 0.9391 - dice_loss: 0.2714 - iou: 0.6290 - val_loss: 0.2560 - val_accuracy: 0.9014 - val_dice_loss: 0.8219 - val_iou: 0.1016
Epoch 48/70
73/73 [==============================] - ETA: 0s - loss: 0.2304 - accuracy: 0.8953 - dice_loss: 0.7237 - iou: 0.1628
Epoch 48: val_loss did not improve from 0.10620
73/73 [==============================] - 12s 160ms/step - loss: 0.2304 - accuracy: 0.8953 - dice_loss: 0.7237 - iou: 0.1628 - val_loss: 0.1961 - val_accuracy: 0.9053 - val_dice_loss: 0.6969 - val_iou: 0.1858
Epoch 49/70
73/73 [==============================] - ETA: 0s - loss: 0.2075 - accuracy: 0.9022 - dice_loss: 0.6545 - iou: 0.2138
Epoch 49: val_loss did not improve from 0.10620
73/73 [==============================] - 11s 156ms/step - loss: 0.2075 - accuracy: 0.9022 - dice_loss: 0.6545 - iou: 0.2138 - val_loss: 0.1481 - val_accuracy: 0.9246 - val_dice_loss: 0.5206 - val_iou: 0.3308
Epoch 50/70
73/73 [==============================] - ETA: 0s - loss: 0.1726 - accuracy: 0.9153 - dice_loss: 0.5174 - iou: 0.3260
Epoch 50: val_loss did not improve from 0.10620
73/73 [==============================] - 12s 160ms/step - loss: 0.1726 - accuracy: 0.9153 - dice_loss: 0.5174 - iou: 0.3260 - val_loss: 0.1403 - val_accuracy: 0.9272 - val_dice_loss: 0.4813 - val_iou: 0.3673
Epoch 51/70
73/73 [==============================] - ETA: 0s - loss: 0.1552 - accuracy: 0.9192 - dice_loss: 0.4724 - iou: 0.3670
Epoch 51: val_loss did not improve from 0.10620
73/73 [==============================] - 11s 155ms/step - loss: 0.1552 - accuracy: 0.9192 - dice_loss: 0.4724 - iou: 0.3670 - val_loss: 0.1436 - val_accuracy: 0.9266 - val_dice_loss: 0.4705 - val_iou: 0.3809
Epoch 52/70
73/73 [==============================] - ETA: 0s - loss: 0.1396 - accuracy: 0.9272 - dice_loss: 0.3782 - iou: 0.4634
Epoch 52: val_loss did not improve from 0.10620
73/73 [==============================] - 11s 157ms/step - loss: 0.1396 - accuracy: 0.9272 - dice_loss: 0.3782 - iou: 0.4634 - val_loss: 0.1257 - val_accuracy: 0.9330 - val_dice_loss: 0.3920 - val_iou: 0.4599
Epoch 53/70
73/73 [==============================] - ETA: 0s - loss: 0.1332 - accuracy: 0.9290 - dice_loss: 0.3796 - iou: 0.4606
Epoch 53: val_loss did not improve from 0.10620
73/73 [==============================] - 12s 160ms/step - loss: 0.1332 - accuracy: 0.9290 - dice_loss: 0.3796 - iou: 0.4606 - val_loss: 0.1241 - val_accuracy: 0.9323 - val_dice_loss: 0.3869 - val_iou: 0.4666
Epoch 54/70
73/73 [==============================] - ETA: 0s - loss: 0.1198 - accuracy: 0.9324 - dice_loss: 0.3326 - iou: 0.5213
Epoch 54: val_loss did not improve from 0.10620
73/73 [==============================] - 12s 160ms/step - loss: 0.1198 - accuracy: 0.9324 - dice_loss: 0.3326 - iou: 0.5213 - val_loss: 0.1100 - val_accuracy: 0.9374 - val_dice_loss: 0.3266 - val_iou: 0.5389
Epoch 55/70
73/73 [==============================] - ETA: 0s - loss: 0.1122 - accuracy: 0.9354 - dice_loss: 0.2880 - iou: 0.5705
Epoch 55: val_loss did not improve from 0.10620
73/73 [==============================] - 11s 156ms/step - loss: 0.1122 - accuracy: 0.9354 - dice_loss: 0.2880 - iou: 0.5705 - val_loss: 0.1197 - val_accuracy: 0.9339 - val_dice_loss: 0.3794 - val_iou: 0.4746
Epoch 56/70
73/73 [==============================] - ETA: 0s - loss: 0.1000 - accuracy: 0.9394 - dice_loss: 0.2430 - iou: 0.6354
Epoch 56: val_loss improved from 0.10620 to 0.10161, saving model to ./unet_70_v3.h5
73/73 [==============================] - 12s 172ms/step - loss: 0.1000 - accuracy: 0.9394 - dice_loss: 0.2430 - iou: 0.6354 - val_loss: 0.1016 - val_accuracy: 0.9414 - val_dice_loss: 0.2789 - val_iou: 0.5960
Epoch 57/70
73/73 [==============================] - ETA: 0s - loss: 0.0987 - accuracy: 0.9392 - dice_loss: 0.2392 - iou: 0.6437
Epoch 57: val_loss did not improve from 0.10161
73/73 [==============================] - 12s 160ms/step - loss: 0.0987 - accuracy: 0.9392 - dice_loss: 0.2392 - iou: 0.6437 - val_loss: 0.1048 - val_accuracy: 0.9406 - val_dice_loss: 0.2778 - val_iou: 0.6022
Epoch 58/70
73/73 [==============================] - ETA: 0s - loss: 0.0932 - accuracy: 0.9420 - dice_loss: 0.2274 - iou: 0.6532
Epoch 58: val_loss did not improve from 0.10161
73/73 [==============================] - 12s 160ms/step - loss: 0.0932 - accuracy: 0.9420 - dice_loss: 0.2274 - iou: 0.6532 - val_loss: 0.1198 - val_accuracy: 0.9326 - val_dice_loss: 0.2794 - val_iou: 0.6098
Epoch 59/70
73/73 [==============================] - ETA: 0s - loss: 0.0805 - accuracy: 0.9470 - dice_loss: 0.1296 - iou: 0.7920
Epoch 59: val_loss did not improve from 0.10161
73/73 [==============================] - 11s 156ms/step - loss: 0.0805 - accuracy: 0.9470 - dice_loss: 0.1296 - iou: 0.7920 - val_loss: 0.1121 - val_accuracy: 0.9389 - val_dice_loss: 0.2362 - val_iou: 0.6583
Epoch 60/70
73/73 [==============================] - ETA: 0s - loss: 0.0748 - accuracy: 0.9487 - dice_loss: 0.1160 - iou: 0.8090
Epoch 60: val_loss did not improve from 0.10161
73/73 [==============================] - 11s 156ms/step - loss: 0.0748 - accuracy: 0.9487 - dice_loss: 0.1160 - iou: 0.8090 - val_loss: 0.1115 - val_accuracy: 0.9389 - val_dice_loss: 0.2347 - val_iou: 0.6625
Epoch 61/70
73/73 [==============================] - ETA: 0s - loss: 0.0675 - accuracy: 0.9514 - dice_loss: 0.0829 - iou: 0.8700
Epoch 61: val_loss did not improve from 0.10161
73/73 [==============================] - 11s 156ms/step - loss: 0.0675 - accuracy: 0.9514 - dice_loss: 0.0829 - iou: 0.8700 - val_loss: 0.1194 - val_accuracy: 0.9377 - val_dice_loss: 0.2425 - val_iou: 0.6597
Epoch 62/70
73/73 [==============================] - ETA: 0s - loss: 0.0679 - accuracy: 0.9508 - dice_loss: 0.0997 - iou: 0.8461
Epoch 62: val_loss did not improve from 0.10161
73/73 [==============================] - 11s 156ms/step - loss: 0.0679 - accuracy: 0.9508 - dice_loss: 0.0997 - iou: 0.8461 - val_loss: 0.1119 - val_accuracy: 0.9387 - val_dice_loss: 0.2257 - val_iou: 0.6809
Epoch 63/70
73/73 [==============================] - ETA: 0s - loss: 0.0735 - accuracy: 0.9488 - dice_loss: 0.1167 - iou: 0.8195
Epoch 63: val_loss did not improve from 0.10161
73/73 [==============================] - 11s 155ms/step - loss: 0.0735 - accuracy: 0.9488 - dice_loss: 0.1167 - iou: 0.8195 - val_loss: 0.1193 - val_accuracy: 0.9372 - val_dice_loss: 0.2858 - val_iou: 0.5937
Epoch 64/70
73/73 [==============================] - ETA: 0s - loss: 0.0674 - accuracy: 0.9510 - dice_loss: 0.0809 - iou: 0.8741
Epoch 64: val_loss did not improve from 0.10161
73/73 [==============================] - 11s 156ms/step - loss: 0.0674 - accuracy: 0.9510 - dice_loss: 0.0809 - iou: 0.8741 - val_loss: 0.1689 - val_accuracy: 0.9384 - val_dice_loss: 0.1902 - val_iou: 0.7352
Epoch 65/70
73/73 [==============================] - ETA: 0s - loss: 0.0620 - accuracy: 0.9524 - dice_loss: 0.0760 - iou: 0.8838
Epoch 65: val_loss did not improve from 0.10161
73/73 [==============================] - 12s 160ms/step - loss: 0.0620 - accuracy: 0.9524 - dice_loss: 0.0760 - iou: 0.8838 - val_loss: 0.1433 - val_accuracy: 0.9335 - val_dice_loss: 0.2983 - val_iou: 0.5797
Epoch 66/70
73/73 [==============================] - ETA: 0s - loss: 0.0671 - accuracy: 0.9510 - dice_loss: 0.1069 - iou: 0.8357
Epoch 66: val_loss did not improve from 0.10161
73/73 [==============================] - 12s 159ms/step - loss: 0.0671 - accuracy: 0.9510 - dice_loss: 0.1069 - iou: 0.8357 - val_loss: 0.1192 - val_accuracy: 0.9423 - val_dice_loss: 0.1506 - val_iou: 0.7952
Epoch 67/70
73/73 [==============================] - ETA: 0s - loss: 0.0575 - accuracy: 0.9541 - dice_loss: 0.0384 - iou: 0.9546
Epoch 67: val_loss did not improve from 0.10161
73/73 [==============================] - 12s 159ms/step - loss: 0.0575 - accuracy: 0.9541 - dice_loss: 0.0384 - iou: 0.9546 - val_loss: 0.1186 - val_accuracy: 0.9408 - val_dice_loss: 0.1660 - val_iou: 0.7707
Epoch 68/70
73/73 [==============================] - ETA: 0s - loss: 0.0547 - accuracy: 0.9550 - dice_loss: 0.0282 - iou: 0.9667
Epoch 68: val_loss did not improve from 0.10161
73/73 [==============================] - 11s 156ms/step - loss: 0.0547 - accuracy: 0.9550 - dice_loss: 0.0282 - iou: 0.9667 - val_loss: 0.1491 - val_accuracy: 0.9359 - val_dice_loss: 0.1778 - val_iou: 0.7572
Epoch 69/70
73/73 [==============================] - ETA: 0s - loss: 0.0534 - accuracy: 0.9555 - dice_loss: 0.0024 - iou: 1.0185
Epoch 69: val_loss did not improve from 0.10161
73/73 [==============================] - 11s 155ms/step - loss: 0.0534 - accuracy: 0.9555 - dice_loss: 0.0024 - iou: 1.0185 - val_loss: 0.1701 - val_accuracy: 0.9392 - val_dice_loss: 0.2355 - val_iou: 0.6659
Epoch 70/70
73/73 [==============================] - ETA: 0s - loss: 0.0899 - accuracy: 0.9443 - dice_loss: 0.1846 - iou: 0.7129
Epoch 70: val_loss did not improve from 0.10161
73/73 [==============================] - 12s 160ms/step - loss: 0.0899 - accuracy: 0.9443 - dice_loss: 0.1846 - iou: 0.7129 - val_loss: 0.1317 - val_accuracy: 0.9377 - val_dice_loss: 0.2158 - val_iou: 0.7004

Model Performance¶

In [ ]:
plt.figure(figsize=(20,5))

plt.subplot(1,4,1)
plt.title("Model Loss")
plt.plot(history.history['loss'], label="Training")
plt.plot(history.history['val_loss'], label="Validation")
plt.legend()
plt.grid()

plt.subplot(1,4,2)
plt.title("Model Accuracy")
plt.plot(history.history['accuracy'], label="Training")
plt.plot(history.history['val_accuracy'], label="Validation")
plt.legend()
plt.grid()

plt.subplot(1,4,3)
plt.title("Model IoU")
plt.plot(history.history['iou'], label="Training")
plt.plot(history.history['val_iou'], label="Validation")
plt.legend()
plt.grid()

plt.subplot(1,4,4)
plt.title("Model Dice Loss")
plt.plot(history.history['dice_loss'], label="Training")
plt.plot(history.history['val_dice_loss'], label="Validation")
plt.legend()
plt.grid()

plt.show()

Predictions¶

In [ ]:
unet_model = load_model('./unet_70_v3.h5',custom_objects={'dice_loss': dice_loss,"iou":iou})
In [ ]:
mask_pred = unet_model.predict(scan_test)
3/3 [==============================] - 5s 57ms/step
In [ ]:
print(mask_pred.shape)
(65, 128, 128, 1)
In [ ]:
plt.figure(figsize = (10,60))

i = 0
x = 0
while i < 45 :

    plt.subplot(15,3,i+1)
    plt.imshow(scan_test[x], 'gray')
    plt.title('Real medic Image')
    plt.axis('off')

    plt.subplot(15,3,i+2)
    plt.imshow(mask_test[x], 'gray')
    plt.title('Ground Truth Img')
    plt.axis('off')

    plt.subplot(15,3,i+3)
    plt.imshow(mask_pred[x], 'gray')
    plt.title('Predicited Image')
    plt.axis('off')

    x += 1
    i += 3
plt.show()

Attention UNet¶

Building the model¶

ENCODER

In [ ]:
class EncoderBlock(Layer):

    def __init__(self, filters, rate, pooling=True, **kwargs):
        super(EncoderBlock, self).__init__(**kwargs)

        self.filters = filters
        self.rate = rate
        self.pooling = pooling

        self.c1 = Conv2D(filters, kernel_size=3, strides=1, padding='same', activation='relu', kernel_initializer='he_normal')
        self.drop = Dropout(rate)
        self.c2 = Conv2D(filters, kernel_size=3, strides=1, padding='same', activation='relu', kernel_initializer='he_normal')
        self.pool = MaxPool2D()

    def call(self, X):
        x = self.c1(X)
        x = self.drop(x)
        x = self.c2(x)
        if self.pooling:
            y = self.pool(x)
            return y, x
        else:
            return x

    def get_config(self):
        base_config = super().get_config()
        return {
            **base_config,
            "filters":self.filters,
            'rate':self.rate,
            'pooling':self.pooling
        }

DECODER

In [ ]:
class DecoderBlock(Layer):

    def __init__(self, filters, rate, **kwargs):
        super(DecoderBlock, self).__init__(**kwargs)

        self.filters = filters
        self.rate = rate

        self.up = UpSampling2D()
        self.net = EncoderBlock(filters, rate, pooling=False)

    def call(self, X):
        X, skip_X = X
        x = self.up(X)
        c_ = concatenate([x, skip_X])
        x = self.net(c_)
        return x

    def get_config(self):
        base_config = super().get_config()
        return {
            **base_config,
            "filters":self.filters,
            'rate':self.rate,
        }

ATTENTION GATE

In [ ]:
class AttentionGate(Layer):

    def __init__(self, filters, bn, **kwargs):
        super(AttentionGate, self).__init__(**kwargs)

        self.filters = filters
        self.bn = bn

        self.normal = Conv2D(filters, kernel_size=3, padding='same', activation='relu', kernel_initializer='he_normal')
        self.down = Conv2D(filters, kernel_size=3, strides=2, padding='same', activation='relu', kernel_initializer='he_normal')
        self.learn = Conv2D(1, kernel_size=1, padding='same', activation='sigmoid')
        self.resample = UpSampling2D()
        self.BN = BatchNormalization()

    def call(self, X):
        X, skip_X = X

        x = self.normal(X)
        skip = self.down(skip_X)
        x = Add()([x, skip])
        x = self.learn(x)
        x = self.resample(x)
        f = Multiply()([x, skip_X])
        if self.bn:
            return self.BN(f)
        else:
            return f
        # return f

    def get_config(self):
        base_config = super().get_config()
        return {
            **base_config,
            "filters":self.filters,
            "bn":self.bn
        }

CALLBACKS

In [ ]:
def show_image(image, title=None, cmap=None, alpha=1):
    plt.imshow(image, cmap=cmap, alpha=alpha)
    if title is not None:
        plt.title(title)
    plt.axis('off')

def show_mask(image, mask, cmap=None, alpha=0.4):
    plt.imshow(image)
    plt.imshow(tf.squeeze(mask), cmap=cmap, alpha=alpha)
    plt.axis('off')
In [ ]:
class ShowProgress(Callback):
    def on_epoch_end(self, epochs, logs=None):
        id = np.random.randint(200)
        exp = GradCAM()
        image = scan[id]
        m = mask[id]
        pred_mask = self.model.predict(image[np.newaxis,...])
        cam = exp.explain(
            validation_data=(image[np.newaxis,...], m),
            class_index=1,
            layer_name='Attention4',
            model=self.model
        )

        plt.figure(figsize=(10,5))

        plt.subplot(1,3,1)
        plt.title("Original Mask")
        show_mask(image, m, cmap='copper')

        plt.subplot(1,3,2)
        plt.title("Predicted Mask")
        show_mask(image, pred_mask, cmap='copper')

        plt.subplot(1,3,3)
        show_image(cam,title="GradCAM")

        plt.tight_layout()
        plt.show()
In [ ]:
# Inputs
input_layer = Input(shape=scan.shape[-3:])

# Encoder
p1, c1 = EncoderBlock(32,0.1, name="Encoder1")(input_layer)
p2, c2 = EncoderBlock(64,0.1, name="Encoder2")(p1)
p3, c3 = EncoderBlock(128,0.2, name="Encoder3")(p2)
p4, c4 = EncoderBlock(256,0.2, name="Encoder4")(p3)

# Encoding
encoding = EncoderBlock(512,0.3, pooling=False, name="Encoding")(p4)

# Attention + Decoder

a1 = AttentionGate(256, bn=True, name="Attention1")([encoding, c4])
d1 = DecoderBlock(256,0.2, name="Decoder1")([encoding, a1])

a2 = AttentionGate(128, bn=True, name="Attention2")([d1, c3])
d2 = DecoderBlock(128,0.2, name="Decoder2")([d1, a2])

a3 = AttentionGate(64, bn=True, name="Attention3")([d2, c2])
d3 = DecoderBlock(64,0.1, name="Decoder3")([d2, a3])


a4 = AttentionGate(32, bn=True, name="Attention4")([d3, c1])
d4 = DecoderBlock(32,0.1, name="Decoder4")([d3, a4])

# Output
output_layer = Conv2D(1, kernel_size=1, activation='sigmoid', padding='same')(d4)
In [ ]:
att_model = Model(
    inputs=[input_layer],
    outputs=[output_layer]
)
In [ ]:
keras.utils.plot_model(att_model, './attunet_plot.png', show_shapes = True)
Out[ ]:

Training¶

In [ ]:
att_model.compile(
    loss='binary_crossentropy',
    optimizer = keras.optimizers.Adam(learning_rate = 0.00005),
    metrics=['accuracy', iou,dice_loss]
)
In [ ]:
# Callbacks
cb = [
    # EarlyStopping(patience=3, restore_best_weight=True),
    ModelCheckpoint("AttentionUNet_30_v4.h5", save_best_only=True),
]
In [ ]:
BATCH_SIZE = 8
SPE = len(scan_train)//BATCH_SIZE
history = att_model.fit(scan_train, mask_train, epochs = 50, batch_size = BATCH_SIZE,
                    steps_per_epoch=SPE, validation_data = (scan_test, mask_test), callbacks = cb)
Epoch 1/50
72/72 [==============================] - 31s 105ms/step - loss: 0.2869 - accuracy: 0.8853 - iou: 0.1112 - dice_loss: 0.8037 - val_loss: 0.3740 - val_accuracy: 0.9065 - val_iou: 0.1030 - val_dice_loss: 0.8151
Epoch 2/50
72/72 [==============================] - 6s 87ms/step - loss: 0.2251 - accuracy: 0.8998 - iou: 0.1939 - dice_loss: 0.6780 - val_loss: 0.2458 - val_accuracy: 0.9091 - val_iou: 0.1277 - val_dice_loss: 0.7752
Epoch 3/50
72/72 [==============================] - 6s 85ms/step - loss: 0.1922 - accuracy: 0.9105 - iou: 0.2585 - dice_loss: 0.5942 - val_loss: 0.2129 - val_accuracy: 0.9114 - val_iou: 0.1509 - val_dice_loss: 0.7393
Epoch 4/50
72/72 [==============================] - 6s 90ms/step - loss: 0.1862 - accuracy: 0.9129 - iou: 0.2856 - dice_loss: 0.5627 - val_loss: 0.1979 - val_accuracy: 0.9084 - val_iou: 0.1557 - val_dice_loss: 0.7318
Epoch 5/50
72/72 [==============================] - 6s 89ms/step - loss: 0.1818 - accuracy: 0.9143 - iou: 0.3092 - dice_loss: 0.5337 - val_loss: 0.1892 - val_accuracy: 0.9193 - val_iou: 0.1891 - val_dice_loss: 0.6846
Epoch 6/50
72/72 [==============================] - 7s 92ms/step - loss: 0.1726 - accuracy: 0.9170 - iou: 0.3104 - dice_loss: 0.5325 - val_loss: 0.1665 - val_accuracy: 0.9270 - val_iou: 0.2466 - val_dice_loss: 0.6091
Epoch 7/50
72/72 [==============================] - 6s 81ms/step - loss: 0.1670 - accuracy: 0.9199 - iou: 0.3434 - dice_loss: 0.4950 - val_loss: 0.1789 - val_accuracy: 0.9211 - val_iou: 0.2515 - val_dice_loss: 0.6035
Epoch 8/50
72/72 [==============================] - 6s 90ms/step - loss: 0.1536 - accuracy: 0.9232 - iou: 0.3666 - dice_loss: 0.4703 - val_loss: 0.1561 - val_accuracy: 0.9259 - val_iou: 0.3203 - val_dice_loss: 0.5258
Epoch 9/50
72/72 [==============================] - 6s 88ms/step - loss: 0.1668 - accuracy: 0.9204 - iou: 0.3471 - dice_loss: 0.4911 - val_loss: 0.1429 - val_accuracy: 0.9293 - val_iou: 0.3388 - val_dice_loss: 0.5058
Epoch 10/50
72/72 [==============================] - 6s 79ms/step - loss: 0.1453 - accuracy: 0.9256 - iou: 0.3927 - dice_loss: 0.4435 - val_loss: 0.1755 - val_accuracy: 0.9256 - val_iou: 0.3393 - val_dice_loss: 0.5081
Epoch 11/50
72/72 [==============================] - 6s 81ms/step - loss: 0.1462 - accuracy: 0.9267 - iou: 0.3788 - dice_loss: 0.4578 - val_loss: 0.1478 - val_accuracy: 0.9297 - val_iou: 0.3531 - val_dice_loss: 0.4893
Epoch 12/50
72/72 [==============================] - 6s 82ms/step - loss: 0.1380 - accuracy: 0.9282 - iou: 0.3995 - dice_loss: 0.4358 - val_loss: 0.1583 - val_accuracy: 0.9269 - val_iou: 0.3505 - val_dice_loss: 0.4900
Epoch 13/50
72/72 [==============================] - 9s 131ms/step - loss: 0.1387 - accuracy: 0.9269 - iou: 0.4150 - dice_loss: 0.4199 - val_loss: 0.1412 - val_accuracy: 0.9308 - val_iou: 0.3956 - val_dice_loss: 0.4466
Epoch 14/50
72/72 [==============================] - 6s 82ms/step - loss: 0.1351 - accuracy: 0.9289 - iou: 0.4272 - dice_loss: 0.4093 - val_loss: 0.1495 - val_accuracy: 0.9315 - val_iou: 0.4459 - val_dice_loss: 0.3975
Epoch 15/50
72/72 [==============================] - 6s 82ms/step - loss: 0.1225 - accuracy: 0.9340 - iou: 0.4513 - dice_loss: 0.3848 - val_loss: 0.1445 - val_accuracy: 0.9299 - val_iou: 0.4076 - val_dice_loss: 0.4364
Epoch 16/50
72/72 [==============================] - 6s 82ms/step - loss: 0.1360 - accuracy: 0.9279 - iou: 0.4319 - dice_loss: 0.4041 - val_loss: 0.1947 - val_accuracy: 0.9239 - val_iou: 0.3370 - val_dice_loss: 0.5140
Epoch 17/50
72/72 [==============================] - 6s 82ms/step - loss: 0.1192 - accuracy: 0.9349 - iou: 0.4442 - dice_loss: 0.3922 - val_loss: 0.1414 - val_accuracy: 0.9346 - val_iou: 0.4489 - val_dice_loss: 0.3934
Epoch 18/50
72/72 [==============================] - 6s 84ms/step - loss: 0.1188 - accuracy: 0.9335 - iou: 0.4696 - dice_loss: 0.3669 - val_loss: 0.1489 - val_accuracy: 0.9325 - val_iou: 0.4527 - val_dice_loss: 0.3919
Epoch 19/50
72/72 [==============================] - 6s 80ms/step - loss: 0.1221 - accuracy: 0.9326 - iou: 0.4722 - dice_loss: 0.3642 - val_loss: 0.1447 - val_accuracy: 0.9306 - val_iou: 0.4210 - val_dice_loss: 0.4218
Epoch 20/50
72/72 [==============================] - 6s 81ms/step - loss: 0.1059 - accuracy: 0.9383 - iou: 0.5150 - dice_loss: 0.3253 - val_loss: 0.1592 - val_accuracy: 0.9203 - val_iou: 0.4279 - val_dice_loss: 0.4187
Epoch 21/50
72/72 [==============================] - 6s 82ms/step - loss: 0.0976 - accuracy: 0.9413 - iou: 0.5309 - dice_loss: 0.3119 - val_loss: 0.1864 - val_accuracy: 0.9023 - val_iou: 0.4032 - val_dice_loss: 0.4414
Epoch 22/50
72/72 [==============================] - 6s 83ms/step - loss: 0.1050 - accuracy: 0.9384 - iou: 0.5148 - dice_loss: 0.3247 - val_loss: 0.1657 - val_accuracy: 0.9215 - val_iou: 0.3887 - val_dice_loss: 0.4575
Epoch 23/50
72/72 [==============================] - 6s 80ms/step - loss: 0.0984 - accuracy: 0.9415 - iou: 0.5206 - dice_loss: 0.3215 - val_loss: 0.1681 - val_accuracy: 0.9148 - val_iou: 0.4057 - val_dice_loss: 0.4381
Epoch 24/50
72/72 [==============================] - 6s 82ms/step - loss: 0.0970 - accuracy: 0.9422 - iou: 0.5459 - dice_loss: 0.2981 - val_loss: 0.1669 - val_accuracy: 0.9162 - val_iou: 0.4397 - val_dice_loss: 0.4033
Epoch 25/50
72/72 [==============================] - 6s 81ms/step - loss: 0.0943 - accuracy: 0.9417 - iou: 0.5508 - dice_loss: 0.2946 - val_loss: 0.1680 - val_accuracy: 0.9137 - val_iou: 0.4216 - val_dice_loss: 0.4215
Epoch 26/50
72/72 [==============================] - 6s 85ms/step - loss: 0.0866 - accuracy: 0.9455 - iou: 0.5668 - dice_loss: 0.2811 - val_loss: 0.1325 - val_accuracy: 0.9345 - val_iou: 0.4975 - val_dice_loss: 0.3492
Epoch 27/50
72/72 [==============================] - 6s 81ms/step - loss: 0.0968 - accuracy: 0.9389 - iou: 0.5545 - dice_loss: 0.2916 - val_loss: 0.1577 - val_accuracy: 0.9190 - val_iou: 0.4363 - val_dice_loss: 0.4091
Epoch 28/50
72/72 [==============================] - 6s 85ms/step - loss: 0.0897 - accuracy: 0.9442 - iou: 0.5460 - dice_loss: 0.3002 - val_loss: 0.1171 - val_accuracy: 0.9376 - val_iou: 0.5254 - val_dice_loss: 0.3241
Epoch 29/50
72/72 [==============================] - 6s 82ms/step - loss: 0.0742 - accuracy: 0.9479 - iou: 0.6482 - dice_loss: 0.2162 - val_loss: 0.1284 - val_accuracy: 0.9291 - val_iou: 0.4848 - val_dice_loss: 0.3611
Epoch 30/50
72/72 [==============================] - 6s 80ms/step - loss: 0.0823 - accuracy: 0.9471 - iou: 0.5689 - dice_loss: 0.2817 - val_loss: 0.1349 - val_accuracy: 0.9340 - val_iou: 0.4910 - val_dice_loss: 0.3539
Epoch 31/50
72/72 [==============================] - 6s 84ms/step - loss: 0.0687 - accuracy: 0.9510 - iou: 0.6345 - dice_loss: 0.2262 - val_loss: 0.1266 - val_accuracy: 0.9392 - val_iou: 0.5347 - val_dice_loss: 0.3152
Epoch 32/50
72/72 [==============================] - 6s 84ms/step - loss: 0.0723 - accuracy: 0.9491 - iou: 0.6419 - dice_loss: 0.2214 - val_loss: 0.1231 - val_accuracy: 0.9337 - val_iou: 0.5054 - val_dice_loss: 0.3394
Epoch 33/50
72/72 [==============================] - 6s 79ms/step - loss: 0.0666 - accuracy: 0.9510 - iou: 0.6525 - dice_loss: 0.2131 - val_loss: 0.1257 - val_accuracy: 0.9384 - val_iou: 0.5589 - val_dice_loss: 0.2942
Epoch 34/50
72/72 [==============================] - 6s 84ms/step - loss: 0.0678 - accuracy: 0.9505 - iou: 0.6432 - dice_loss: 0.2207 - val_loss: 0.1555 - val_accuracy: 0.9256 - val_iou: 0.4823 - val_dice_loss: 0.3633
Epoch 35/50
72/72 [==============================] - 6s 80ms/step - loss: 0.0689 - accuracy: 0.9498 - iou: 0.6583 - dice_loss: 0.2088 - val_loss: 0.1324 - val_accuracy: 0.9382 - val_iou: 0.5262 - val_dice_loss: 0.3238
Epoch 36/50
72/72 [==============================] - 6s 83ms/step - loss: 0.0607 - accuracy: 0.9533 - iou: 0.6746 - dice_loss: 0.1969 - val_loss: 0.1198 - val_accuracy: 0.9399 - val_iou: 0.5634 - val_dice_loss: 0.2893
Epoch 37/50
72/72 [==============================] - 6s 80ms/step - loss: 0.0555 - accuracy: 0.9542 - iou: 0.6988 - dice_loss: 0.1791 - val_loss: 0.1326 - val_accuracy: 0.9337 - val_iou: 0.5303 - val_dice_loss: 0.3200
Epoch 38/50
72/72 [==============================] - 6s 82ms/step - loss: 0.0573 - accuracy: 0.9538 - iou: 0.6908 - dice_loss: 0.1853 - val_loss: 0.1238 - val_accuracy: 0.9374 - val_iou: 0.5488 - val_dice_loss: 0.3009
Epoch 39/50
72/72 [==============================] - 6s 81ms/step - loss: 0.0551 - accuracy: 0.9531 - iou: 0.7097 - dice_loss: 0.1716 - val_loss: 0.1320 - val_accuracy: 0.9369 - val_iou: 0.5545 - val_dice_loss: 0.2984
Epoch 40/50
72/72 [==============================] - 6s 79ms/step - loss: 0.0571 - accuracy: 0.9547 - iou: 0.6830 - dice_loss: 0.1905 - val_loss: 0.1549 - val_accuracy: 0.9372 - val_iou: 0.5278 - val_dice_loss: 0.3205
Epoch 41/50
72/72 [==============================] - 6s 83ms/step - loss: 0.0582 - accuracy: 0.9527 - iou: 0.7001 - dice_loss: 0.1788 - val_loss: 0.1760 - val_accuracy: 0.9250 - val_iou: 0.5027 - val_dice_loss: 0.3411
Epoch 42/50
72/72 [==============================] - 6s 82ms/step - loss: 0.0546 - accuracy: 0.9547 - iou: 0.7061 - dice_loss: 0.1741 - val_loss: 0.1425 - val_accuracy: 0.9371 - val_iou: 0.5624 - val_dice_loss: 0.2899
Epoch 43/50
72/72 [==============================] - 6s 81ms/step - loss: 0.0479 - accuracy: 0.9572 - iou: 0.7252 - dice_loss: 0.1609 - val_loss: 0.1526 - val_accuracy: 0.9382 - val_iou: 0.5506 - val_dice_loss: 0.3013
Epoch 44/50
72/72 [==============================] - 6s 80ms/step - loss: 0.0504 - accuracy: 0.9564 - iou: 0.7109 - dice_loss: 0.1720 - val_loss: 0.1397 - val_accuracy: 0.9379 - val_iou: 0.5494 - val_dice_loss: 0.3027
Epoch 45/50
72/72 [==============================] - 6s 82ms/step - loss: 0.0490 - accuracy: 0.9568 - iou: 0.7278 - dice_loss: 0.1595 - val_loss: 0.1365 - val_accuracy: 0.9374 - val_iou: 0.5675 - val_dice_loss: 0.2860
Epoch 46/50
72/72 [==============================] - 6s 81ms/step - loss: 0.0487 - accuracy: 0.9554 - iou: 0.7424 - dice_loss: 0.1487 - val_loss: 0.1561 - val_accuracy: 0.9365 - val_iou: 0.5718 - val_dice_loss: 0.2833
Epoch 47/50
72/72 [==============================] - 6s 83ms/step - loss: 0.0468 - accuracy: 0.9570 - iou: 0.7260 - dice_loss: 0.1619 - val_loss: 0.1612 - val_accuracy: 0.9386 - val_iou: 0.5700 - val_dice_loss: 0.2838
Epoch 48/50
72/72 [==============================] - 6s 84ms/step - loss: 0.0452 - accuracy: 0.9575 - iou: 0.7395 - dice_loss: 0.1512 - val_loss: 0.1362 - val_accuracy: 0.9360 - val_iou: 0.5648 - val_dice_loss: 0.2891
Epoch 49/50
72/72 [==============================] - 6s 79ms/step - loss: 0.0441 - accuracy: 0.9575 - iou: 0.7472 - dice_loss: 0.1464 - val_loss: 0.1370 - val_accuracy: 0.9319 - val_iou: 0.5483 - val_dice_loss: 0.3047
Epoch 50/50
72/72 [==============================] - 6s 84ms/step - loss: 0.0431 - accuracy: 0.9582 - iou: 0.7502 - dice_loss: 0.1445 - val_loss: 0.1449 - val_accuracy: 0.9408 - val_iou: 0.5827 - val_dice_loss: 0.2723

Model Performence¶

In [ ]:
plt.figure(figsize=(20,5))

plt.subplot(1,4,1)
plt.title("Model Loss")
plt.plot(history.history['loss'], label="Training")
plt.plot(history.history['val_loss'], label="Validation")
plt.legend()
plt.grid()

plt.subplot(1,4,2)
plt.title("Model Accuracy")
plt.plot(history.history['accuracy'], label="Training")
plt.plot(history.history['val_accuracy'], label="Validation")
plt.legend()
plt.grid()

plt.subplot(1,4,3)
plt.title("IOU")
plt.plot(history.history['iou'], label="Training")
plt.plot(history.history['val_iou'], label="Validation")
plt.legend()
plt.grid()

plt.subplot(1,4,4)
plt.title("Model Dice Loss")
plt.plot(history.history['dice_loss'], label="Training")
plt.plot(history.history['val_dice_loss'], label="Validation")
plt.legend()
plt.grid()

plt.show()

Predictions¶

In [ ]:
mask_pred = att_model.predict(scan_test)
3/3 [==============================] - 3s 29ms/step
In [ ]:
plt.figure(figsize=(20,25))
n=0
for i in range(1,(5*3)+1):
    plt.subplot(5,3,i)
    if n==0:
        id = np.random.randint(len(scan))
        image = scan[id]
        m = mask[id]
        pred_mask = att_model.predict(image[np.newaxis,...])

        plt.title("Original Mask")
        show_mask(image, m)
        n+=1
    elif n==1:
        plt.title("Predicted Mask")
        show_mask(image, pred_mask)
        n+=1
    elif n==2:
        pred_mask = (pred_mask>0.5).astype('float')
        plt.title("Processed Mask")
        show_mask(image, pred_mask)
        n=0
plt.tight_layout()
plt.show()
1/1 [==============================] - 0s 19ms/step
1/1 [==============================] - 0s 18ms/step
1/1 [==============================] - 0s 19ms/step
1/1 [==============================] - 0s 24ms/step
1/1 [==============================] - 0s 21ms/step

Residual Unet¶

Building the model¶

Different Blocks

In [ ]:
def bn_act(x, act=True):
    x = keras.layers.BatchNormalization()(x)
    if act == True:
        x = keras.layers.Activation("relu")(x)
    return x

def conv_block(x, filters, kernel_size=(3, 3), padding="same", strides=1):
    conv = bn_act(x)
    conv = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides)(conv)
    return conv

def stem(x, filters, kernel_size=(3, 3), padding="same", strides=1):
    conv = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides)(x)
    conv = conv_block(conv, filters, kernel_size=kernel_size, padding=padding, strides=strides)

    shortcut = keras.layers.Conv2D(filters, kernel_size=(1, 1), padding=padding, strides=strides)(x)
    shortcut = bn_act(shortcut, act=False)

    output = keras.layers.Add()([conv, shortcut])
    return output

def residual_block(x, filters, kernel_size=(3, 3), padding="same", strides=1):
    res = conv_block(x, filters, kernel_size=kernel_size, padding=padding, strides=strides)
    res = conv_block(res, filters, kernel_size=kernel_size, padding=padding, strides=1)

    shortcut = keras.layers.Conv2D(filters, kernel_size=(1, 1), padding=padding, strides=strides)(x)
    shortcut = bn_act(shortcut, act=False)

    output = keras.layers.Add()([shortcut, res])
    return output

def upsample_concat_block(x, xskip):
    u = keras.layers.UpSampling2D((2, 2))(x)
    c = keras.layers.Concatenate()([u, xskip])
    return c
In [ ]:
def ResUNet():
    f = [16, 32, 64, 128, 256]
    inputs = keras.layers.Input((128, 128, 1))

    ## Encoder
    e0 = inputs
    e1 = stem(e0, f[0])
    e2 = residual_block(e1, f[1], strides=2)
    e3 = residual_block(e2, f[2], strides=2)
    e4 = residual_block(e3, f[3], strides=2)
    e5 = residual_block(e4, f[4], strides=2)

    ## Bridge
    b0 = conv_block(e5, f[4], strides=1)
    b1 = conv_block(b0, f[4], strides=1)

    ## Decoder
    u1 = upsample_concat_block(b1, e4)
    d1 = residual_block(u1, f[4])

    u2 = upsample_concat_block(d1, e3)
    d2 = residual_block(u2, f[3])

    u3 = upsample_concat_block(d2, e2)
    d3 = residual_block(u3, f[2])

    u4 = upsample_concat_block(d3, e1)
    d4 = residual_block(u4, f[1])

    outputs = keras.layers.Conv2D(1, (1, 1), padding="same", activation="sigmoid")(d4)
    model = keras.models.Model(inputs, outputs)
    return model
In [ ]:
res_unet_model = ResUNet()
adam = keras.optimizers.Adam()
res_unet_model.compile(optimizer=adam, loss=BinaryCrossentropy(), metrics=['accuracy',dice_loss,iou])
res_unet_model.summary()
Model: "model_1"
__________________________________________________________________________________________________
 Layer (type)                Output Shape                 Param #   Connected to                  
==================================================================================================
 input_3 (InputLayer)        [(None, 128, 128, 1)]        0         []                            
                                                                                                  
 conv2d_50 (Conv2D)          (None, 128, 128, 16)         160       ['input_3[0][0]']             
                                                                                                  
 batch_normalization_4 (Bat  (None, 128, 128, 16)         64        ['conv2d_50[0][0]']           
 chNormalization)                                                                                 
                                                                                                  
 activation (Activation)     (None, 128, 128, 16)         0         ['batch_normalization_4[0][0]'
                                                                    ]                             
                                                                                                  
 conv2d_52 (Conv2D)          (None, 128, 128, 16)         32        ['input_3[0][0]']             
                                                                                                  
 conv2d_51 (Conv2D)          (None, 128, 128, 16)         2320      ['activation[0][0]']          
                                                                                                  
 batch_normalization_5 (Bat  (None, 128, 128, 16)         64        ['conv2d_52[0][0]']           
 chNormalization)                                                                                 
                                                                                                  
 add (Add)                   (None, 128, 128, 16)         0         ['conv2d_51[0][0]',           
                                                                     'batch_normalization_5[0][0]'
                                                                    ]                             
                                                                                                  
 batch_normalization_6 (Bat  (None, 128, 128, 16)         64        ['add[0][0]']                 
 chNormalization)                                                                                 
                                                                                                  
 activation_1 (Activation)   (None, 128, 128, 16)         0         ['batch_normalization_6[0][0]'
                                                                    ]                             
                                                                                                  
 conv2d_53 (Conv2D)          (None, 64, 64, 32)           4640      ['activation_1[0][0]']        
                                                                                                  
 batch_normalization_7 (Bat  (None, 64, 64, 32)           128       ['conv2d_53[0][0]']           
 chNormalization)                                                                                 
                                                                                                  
 conv2d_55 (Conv2D)          (None, 64, 64, 32)           544       ['add[0][0]']                 
                                                                                                  
 activation_2 (Activation)   (None, 64, 64, 32)           0         ['batch_normalization_7[0][0]'
                                                                    ]                             
                                                                                                  
 batch_normalization_8 (Bat  (None, 64, 64, 32)           128       ['conv2d_55[0][0]']           
 chNormalization)                                                                                 
                                                                                                  
 conv2d_54 (Conv2D)          (None, 64, 64, 32)           9248      ['activation_2[0][0]']        
                                                                                                  
 add_1 (Add)                 (None, 64, 64, 32)           0         ['batch_normalization_8[0][0]'
                                                                    , 'conv2d_54[0][0]']          
                                                                                                  
 batch_normalization_9 (Bat  (None, 64, 64, 32)           128       ['add_1[0][0]']               
 chNormalization)                                                                                 
                                                                                                  
 activation_3 (Activation)   (None, 64, 64, 32)           0         ['batch_normalization_9[0][0]'
                                                                    ]                             
                                                                                                  
 conv2d_56 (Conv2D)          (None, 32, 32, 64)           18496     ['activation_3[0][0]']        
                                                                                                  
 batch_normalization_10 (Ba  (None, 32, 32, 64)           256       ['conv2d_56[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 conv2d_58 (Conv2D)          (None, 32, 32, 64)           2112      ['add_1[0][0]']               
                                                                                                  
 activation_4 (Activation)   (None, 32, 32, 64)           0         ['batch_normalization_10[0][0]
                                                                    ']                            
                                                                                                  
 batch_normalization_11 (Ba  (None, 32, 32, 64)           256       ['conv2d_58[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 conv2d_57 (Conv2D)          (None, 32, 32, 64)           36928     ['activation_4[0][0]']        
                                                                                                  
 add_2 (Add)                 (None, 32, 32, 64)           0         ['batch_normalization_11[0][0]
                                                                    ',                            
                                                                     'conv2d_57[0][0]']           
                                                                                                  
 batch_normalization_12 (Ba  (None, 32, 32, 64)           256       ['add_2[0][0]']               
 tchNormalization)                                                                                
                                                                                                  
 activation_5 (Activation)   (None, 32, 32, 64)           0         ['batch_normalization_12[0][0]
                                                                    ']                            
                                                                                                  
 conv2d_59 (Conv2D)          (None, 16, 16, 128)          73856     ['activation_5[0][0]']        
                                                                                                  
 batch_normalization_13 (Ba  (None, 16, 16, 128)          512       ['conv2d_59[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 conv2d_61 (Conv2D)          (None, 16, 16, 128)          8320      ['add_2[0][0]']               
                                                                                                  
 activation_6 (Activation)   (None, 16, 16, 128)          0         ['batch_normalization_13[0][0]
                                                                    ']                            
                                                                                                  
 batch_normalization_14 (Ba  (None, 16, 16, 128)          512       ['conv2d_61[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 conv2d_60 (Conv2D)          (None, 16, 16, 128)          147584    ['activation_6[0][0]']        
                                                                                                  
 add_3 (Add)                 (None, 16, 16, 128)          0         ['batch_normalization_14[0][0]
                                                                    ',                            
                                                                     'conv2d_60[0][0]']           
                                                                                                  
 batch_normalization_15 (Ba  (None, 16, 16, 128)          512       ['add_3[0][0]']               
 tchNormalization)                                                                                
                                                                                                  
 activation_7 (Activation)   (None, 16, 16, 128)          0         ['batch_normalization_15[0][0]
                                                                    ']                            
                                                                                                  
 conv2d_62 (Conv2D)          (None, 8, 8, 256)            295168    ['activation_7[0][0]']        
                                                                                                  
 batch_normalization_16 (Ba  (None, 8, 8, 256)            1024      ['conv2d_62[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 conv2d_64 (Conv2D)          (None, 8, 8, 256)            33024     ['add_3[0][0]']               
                                                                                                  
 activation_8 (Activation)   (None, 8, 8, 256)            0         ['batch_normalization_16[0][0]
                                                                    ']                            
                                                                                                  
 batch_normalization_17 (Ba  (None, 8, 8, 256)            1024      ['conv2d_64[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 conv2d_63 (Conv2D)          (None, 8, 8, 256)            590080    ['activation_8[0][0]']        
                                                                                                  
 add_4 (Add)                 (None, 8, 8, 256)            0         ['batch_normalization_17[0][0]
                                                                    ',                            
                                                                     'conv2d_63[0][0]']           
                                                                                                  
 batch_normalization_18 (Ba  (None, 8, 8, 256)            1024      ['add_4[0][0]']               
 tchNormalization)                                                                                
                                                                                                  
 activation_9 (Activation)   (None, 8, 8, 256)            0         ['batch_normalization_18[0][0]
                                                                    ']                            
                                                                                                  
 conv2d_65 (Conv2D)          (None, 8, 8, 256)            590080    ['activation_9[0][0]']        
                                                                                                  
 batch_normalization_19 (Ba  (None, 8, 8, 256)            1024      ['conv2d_65[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 activation_10 (Activation)  (None, 8, 8, 256)            0         ['batch_normalization_19[0][0]
                                                                    ']                            
                                                                                                  
 conv2d_66 (Conv2D)          (None, 8, 8, 256)            590080    ['activation_10[0][0]']       
                                                                                                  
 up_sampling2d_8 (UpSamplin  (None, 16, 16, 256)          0         ['conv2d_66[0][0]']           
 g2D)                                                                                             
                                                                                                  
 concatenate_4 (Concatenate  (None, 16, 16, 384)          0         ['up_sampling2d_8[0][0]',     
 )                                                                   'add_3[0][0]']               
                                                                                                  
 batch_normalization_20 (Ba  (None, 16, 16, 384)          1536      ['concatenate_4[0][0]']       
 tchNormalization)                                                                                
                                                                                                  
 activation_11 (Activation)  (None, 16, 16, 384)          0         ['batch_normalization_20[0][0]
                                                                    ']                            
                                                                                                  
 conv2d_67 (Conv2D)          (None, 16, 16, 256)          884992    ['activation_11[0][0]']       
                                                                                                  
 batch_normalization_21 (Ba  (None, 16, 16, 256)          1024      ['conv2d_67[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 conv2d_69 (Conv2D)          (None, 16, 16, 256)          98560     ['concatenate_4[0][0]']       
                                                                                                  
 activation_12 (Activation)  (None, 16, 16, 256)          0         ['batch_normalization_21[0][0]
                                                                    ']                            
                                                                                                  
 batch_normalization_22 (Ba  (None, 16, 16, 256)          1024      ['conv2d_69[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 conv2d_68 (Conv2D)          (None, 16, 16, 256)          590080    ['activation_12[0][0]']       
                                                                                                  
 add_5 (Add)                 (None, 16, 16, 256)          0         ['batch_normalization_22[0][0]
                                                                    ',                            
                                                                     'conv2d_68[0][0]']           
                                                                                                  
 up_sampling2d_9 (UpSamplin  (None, 32, 32, 256)          0         ['add_5[0][0]']               
 g2D)                                                                                             
                                                                                                  
 concatenate_5 (Concatenate  (None, 32, 32, 320)          0         ['up_sampling2d_9[0][0]',     
 )                                                                   'add_2[0][0]']               
                                                                                                  
 batch_normalization_23 (Ba  (None, 32, 32, 320)          1280      ['concatenate_5[0][0]']       
 tchNormalization)                                                                                
                                                                                                  
 activation_13 (Activation)  (None, 32, 32, 320)          0         ['batch_normalization_23[0][0]
                                                                    ']                            
                                                                                                  
 conv2d_70 (Conv2D)          (None, 32, 32, 128)          368768    ['activation_13[0][0]']       
                                                                                                  
 batch_normalization_24 (Ba  (None, 32, 32, 128)          512       ['conv2d_70[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 conv2d_72 (Conv2D)          (None, 32, 32, 128)          41088     ['concatenate_5[0][0]']       
                                                                                                  
 activation_14 (Activation)  (None, 32, 32, 128)          0         ['batch_normalization_24[0][0]
                                                                    ']                            
                                                                                                  
 batch_normalization_25 (Ba  (None, 32, 32, 128)          512       ['conv2d_72[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 conv2d_71 (Conv2D)          (None, 32, 32, 128)          147584    ['activation_14[0][0]']       
                                                                                                  
 add_6 (Add)                 (None, 32, 32, 128)          0         ['batch_normalization_25[0][0]
                                                                    ',                            
                                                                     'conv2d_71[0][0]']           
                                                                                                  
 up_sampling2d_10 (UpSampli  (None, 64, 64, 128)          0         ['add_6[0][0]']               
 ng2D)                                                                                            
                                                                                                  
 concatenate_6 (Concatenate  (None, 64, 64, 160)          0         ['up_sampling2d_10[0][0]',    
 )                                                                   'add_1[0][0]']               
                                                                                                  
 batch_normalization_26 (Ba  (None, 64, 64, 160)          640       ['concatenate_6[0][0]']       
 tchNormalization)                                                                                
                                                                                                  
 activation_15 (Activation)  (None, 64, 64, 160)          0         ['batch_normalization_26[0][0]
                                                                    ']                            
                                                                                                  
 conv2d_73 (Conv2D)          (None, 64, 64, 64)           92224     ['activation_15[0][0]']       
                                                                                                  
 batch_normalization_27 (Ba  (None, 64, 64, 64)           256       ['conv2d_73[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 conv2d_75 (Conv2D)          (None, 64, 64, 64)           10304     ['concatenate_6[0][0]']       
                                                                                                  
 activation_16 (Activation)  (None, 64, 64, 64)           0         ['batch_normalization_27[0][0]
                                                                    ']                            
                                                                                                  
 batch_normalization_28 (Ba  (None, 64, 64, 64)           256       ['conv2d_75[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 conv2d_74 (Conv2D)          (None, 64, 64, 64)           36928     ['activation_16[0][0]']       
                                                                                                  
 add_7 (Add)                 (None, 64, 64, 64)           0         ['batch_normalization_28[0][0]
                                                                    ',                            
                                                                     'conv2d_74[0][0]']           
                                                                                                  
 up_sampling2d_11 (UpSampli  (None, 128, 128, 64)         0         ['add_7[0][0]']               
 ng2D)                                                                                            
                                                                                                  
 concatenate_7 (Concatenate  (None, 128, 128, 80)         0         ['up_sampling2d_11[0][0]',    
 )                                                                   'add[0][0]']                 
                                                                                                  
 batch_normalization_29 (Ba  (None, 128, 128, 80)         320       ['concatenate_7[0][0]']       
 tchNormalization)                                                                                
                                                                                                  
 activation_17 (Activation)  (None, 128, 128, 80)         0         ['batch_normalization_29[0][0]
                                                                    ']                            
                                                                                                  
 conv2d_76 (Conv2D)          (None, 128, 128, 32)         23072     ['activation_17[0][0]']       
                                                                                                  
 batch_normalization_30 (Ba  (None, 128, 128, 32)         128       ['conv2d_76[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 conv2d_78 (Conv2D)          (None, 128, 128, 32)         2592      ['concatenate_7[0][0]']       
                                                                                                  
 activation_18 (Activation)  (None, 128, 128, 32)         0         ['batch_normalization_30[0][0]
                                                                    ']                            
                                                                                                  
 batch_normalization_31 (Ba  (None, 128, 128, 32)         128       ['conv2d_78[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 conv2d_77 (Conv2D)          (None, 128, 128, 32)         9248      ['activation_18[0][0]']       
                                                                                                  
 add_8 (Add)                 (None, 128, 128, 32)         0         ['batch_normalization_31[0][0]
                                                                    ',                            
                                                                     'conv2d_77[0][0]']           
                                                                                                  
 conv2d_79 (Conv2D)          (None, 128, 128, 1)          33        ['add_8[0][0]']               
                                                                                                  
==================================================================================================
Total params: 4722737 (18.02 MB)
Trainable params: 4715441 (17.99 MB)
Non-trainable params: 7296 (28.50 KB)
__________________________________________________________________________________________________
In [ ]:
keras.utils.plot_model(res_unet_model, './resunet_model_plot.png', show_shapes = True)
Out[ ]:

Training¶

In [ ]:
checkp = ModelCheckpoint('./resunet_60_v3.h5',save_best_only = True, verbose = 1)
In [ ]:
scan_train.shape
Out[ ]:
(579, 128, 128, 1)
In [ ]:
history = res_unet_model.fit(scan_train, mask_train, epochs = 60,
                         batch_size = 8, validation_data = (scan_test, mask_test), callbacks = [checkp])
Epoch 1/60
73/73 [==============================] - ETA: 0s - loss: 0.2343 - accuracy: 0.8944 - dice_loss: 0.6363 - iou: 0.2269
Epoch 1: val_loss improved from inf to 1.50301, saving model to ./resunet_60_v3.h5
/usr/local/lib/python3.10/dist-packages/keras/src/engine/training.py:3079: UserWarning: You are saving your model as an HDF5 file via `model.save()`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')`.
  saving_api.save_model(
73/73 [==============================] - 43s 129ms/step - loss: 0.2343 - accuracy: 0.8944 - dice_loss: 0.6363 - iou: 0.2269 - val_loss: 1.5030 - val_accuracy: 0.4595 - val_dice_loss: 0.7876 - val_iou: 0.1214
Epoch 2/60
72/73 [============================>.] - ETA: 0s - loss: 0.1902 - accuracy: 0.9081 - dice_loss: 0.5597 - iou: 0.2870
Epoch 2: val_loss improved from 1.50301 to 0.58558, saving model to ./resunet_60_v3.h5
73/73 [==============================] - 5s 66ms/step - loss: 0.1898 - accuracy: 0.9084 - dice_loss: 0.5640 - iou: 0.2841 - val_loss: 0.5856 - val_accuracy: 0.7099 - val_dice_loss: 0.7078 - val_iou: 0.1735
Epoch 3/60
72/73 [============================>.] - ETA: 0s - loss: 0.1749 - accuracy: 0.9157 - dice_loss: 0.5220 - iou: 0.3175
Epoch 3: val_loss improved from 0.58558 to 0.25449, saving model to ./resunet_60_v3.h5
73/73 [==============================] - 5s 69ms/step - loss: 0.1747 - accuracy: 0.9158 - dice_loss: 0.5212 - iou: 0.3182 - val_loss: 0.2545 - val_accuracy: 0.8862 - val_dice_loss: 0.5784 - val_iou: 0.2712
Epoch 4/60
72/73 [============================>.] - ETA: 0s - loss: 0.1622 - accuracy: 0.9180 - dice_loss: 0.4877 - iou: 0.3492
Epoch 4: val_loss did not improve from 0.25449
73/73 [==============================] - 4s 60ms/step - loss: 0.1619 - accuracy: 0.9181 - dice_loss: 0.4867 - iou: 0.3502 - val_loss: 0.5600 - val_accuracy: 0.7861 - val_dice_loss: 0.6386 - val_iou: 0.2252
Epoch 5/60
72/73 [============================>.] - ETA: 0s - loss: 0.1406 - accuracy: 0.9263 - dice_loss: 0.4282 - iou: 0.4050
Epoch 5: val_loss did not improve from 0.25449
73/73 [==============================] - 4s 61ms/step - loss: 0.1408 - accuracy: 0.9261 - dice_loss: 0.4281 - iou: 0.4050 - val_loss: 0.3574 - val_accuracy: 0.9032 - val_dice_loss: 0.8007 - val_iou: 0.1201
Epoch 6/60
72/73 [============================>.] - ETA: 0s - loss: 0.1246 - accuracy: 0.9316 - dice_loss: 0.3761 - iou: 0.4584
Epoch 6: val_loss did not improve from 0.25449
73/73 [==============================] - 5s 64ms/step - loss: 0.1251 - accuracy: 0.9313 - dice_loss: 0.3762 - iou: 0.4582 - val_loss: 0.3732 - val_accuracy: 0.7911 - val_dice_loss: 0.6383 - val_iou: 0.2252
Epoch 7/60
72/73 [============================>.] - ETA: 0s - loss: 0.1174 - accuracy: 0.9330 - dice_loss: 0.3648 - iou: 0.4709
Epoch 7: val_loss improved from 0.25449 to 0.14026, saving model to ./resunet_60_v3.h5
73/73 [==============================] - 5s 66ms/step - loss: 0.1171 - accuracy: 0.9331 - dice_loss: 0.3628 - iou: 0.4733 - val_loss: 0.1403 - val_accuracy: 0.9250 - val_dice_loss: 0.4177 - val_iou: 0.4213
Epoch 8/60
72/73 [============================>.] - ETA: 0s - loss: 0.0942 - accuracy: 0.9425 - dice_loss: 0.2880 - iou: 0.5563
Epoch 8: val_loss improved from 0.14026 to 0.12642, saving model to ./resunet_60_v3.h5
73/73 [==============================] - 5s 68ms/step - loss: 0.0945 - accuracy: 0.9424 - dice_loss: 0.2884 - iou: 0.5558 - val_loss: 0.1264 - val_accuracy: 0.9422 - val_dice_loss: 0.3327 - val_iou: 0.5076
Epoch 9/60
72/73 [============================>.] - ETA: 0s - loss: 0.0867 - accuracy: 0.9440 - dice_loss: 0.2765 - iou: 0.5709
Epoch 9: val_loss did not improve from 0.12642
73/73 [==============================] - 5s 62ms/step - loss: 0.0869 - accuracy: 0.9438 - dice_loss: 0.2766 - iou: 0.5706 - val_loss: 0.2557 - val_accuracy: 0.8996 - val_dice_loss: 0.5102 - val_iou: 0.3298
Epoch 10/60
72/73 [============================>.] - ETA: 0s - loss: 0.0867 - accuracy: 0.9436 - dice_loss: 0.2763 - iou: 0.5728
Epoch 10: val_loss did not improve from 0.12642
73/73 [==============================] - 4s 60ms/step - loss: 0.0866 - accuracy: 0.9436 - dice_loss: 0.2748 - iou: 0.5748 - val_loss: 0.2216 - val_accuracy: 0.9053 - val_dice_loss: 0.3909 - val_iou: 0.4517
Epoch 11/60
72/73 [============================>.] - ETA: 0s - loss: 0.0722 - accuracy: 0.9496 - dice_loss: 0.2252 - iou: 0.6348
Epoch 11: val_loss did not improve from 0.12642
73/73 [==============================] - 5s 67ms/step - loss: 0.0722 - accuracy: 0.9496 - dice_loss: 0.2253 - iou: 0.6346 - val_loss: 0.2289 - val_accuracy: 0.8998 - val_dice_loss: 0.4135 - val_iou: 0.4310
Epoch 12/60
72/73 [============================>.] - ETA: 0s - loss: 0.0655 - accuracy: 0.9514 - dice_loss: 0.2028 - iou: 0.6650
Epoch 12: val_loss did not improve from 0.12642
73/73 [==============================] - 5s 65ms/step - loss: 0.0659 - accuracy: 0.9513 - dice_loss: 0.2039 - iou: 0.6636 - val_loss: 0.1285 - val_accuracy: 0.9344 - val_dice_loss: 0.3042 - val_iou: 0.5496
Epoch 13/60
72/73 [============================>.] - ETA: 0s - loss: 0.0678 - accuracy: 0.9506 - dice_loss: 0.2228 - iou: 0.6396
Epoch 13: val_loss did not improve from 0.12642
73/73 [==============================] - 4s 60ms/step - loss: 0.0722 - accuracy: 0.9497 - dice_loss: 0.2245 - iou: 0.6375 - val_loss: 0.2011 - val_accuracy: 0.9352 - val_dice_loss: 0.3389 - val_iou: 0.5158
Epoch 14/60
72/73 [============================>.] - ETA: 0s - loss: 0.1340 - accuracy: 0.9278 - dice_loss: 0.3948 - iou: 0.4404
Epoch 14: val_loss did not improve from 0.12642
73/73 [==============================] - 5s 63ms/step - loss: 0.1336 - accuracy: 0.9279 - dice_loss: 0.3938 - iou: 0.4414 - val_loss: 0.2444 - val_accuracy: 0.9070 - val_dice_loss: 0.5810 - val_iou: 0.2778
Epoch 15/60
72/73 [============================>.] - ETA: 0s - loss: 0.0731 - accuracy: 0.9487 - dice_loss: 0.2324 - iou: 0.6258
Epoch 15: val_loss did not improve from 0.12642
73/73 [==============================] - 4s 61ms/step - loss: 0.0729 - accuracy: 0.9487 - dice_loss: 0.2311 - iou: 0.6276 - val_loss: 0.1293 - val_accuracy: 0.9339 - val_dice_loss: 0.3114 - val_iou: 0.5408
Epoch 16/60
72/73 [============================>.] - ETA: 0s - loss: 0.0581 - accuracy: 0.9536 - dice_loss: 0.1845 - iou: 0.6904
Epoch 16: val_loss did not improve from 0.12642
73/73 [==============================] - 4s 60ms/step - loss: 0.0584 - accuracy: 0.9535 - dice_loss: 0.1893 - iou: 0.6850 - val_loss: 0.1494 - val_accuracy: 0.9242 - val_dice_loss: 0.3363 - val_iou: 0.5124
Epoch 17/60
72/73 [============================>.] - ETA: 0s - loss: 0.0558 - accuracy: 0.9545 - dice_loss: 0.1802 - iou: 0.6968
Epoch 17: val_loss did not improve from 0.12642
73/73 [==============================] - 5s 63ms/step - loss: 0.0558 - accuracy: 0.9545 - dice_loss: 0.1822 - iou: 0.6942 - val_loss: 0.1489 - val_accuracy: 0.9364 - val_dice_loss: 0.2990 - val_iou: 0.5579
Epoch 18/60
72/73 [============================>.] - ETA: 0s - loss: 0.0505 - accuracy: 0.9563 - dice_loss: 0.1624 - iou: 0.7223
Epoch 18: val_loss did not improve from 0.12642
73/73 [==============================] - 5s 62ms/step - loss: 0.0503 - accuracy: 0.9564 - dice_loss: 0.1622 - iou: 0.7226 - val_loss: 0.1480 - val_accuracy: 0.9371 - val_dice_loss: 0.3076 - val_iou: 0.5450
Epoch 19/60
72/73 [============================>.] - ETA: 0s - loss: 0.0460 - accuracy: 0.9578 - dice_loss: 0.1497 - iou: 0.7414
Epoch 19: val_loss did not improve from 0.12642
73/73 [==============================] - 4s 60ms/step - loss: 0.0461 - accuracy: 0.9577 - dice_loss: 0.1495 - iou: 0.7417 - val_loss: 0.1524 - val_accuracy: 0.9364 - val_dice_loss: 0.2823 - val_iou: 0.5744
Epoch 20/60
72/73 [============================>.] - ETA: 0s - loss: 0.0435 - accuracy: 0.9584 - dice_loss: 0.1408 - iou: 0.7543
Epoch 20: val_loss improved from 0.12642 to 0.12471, saving model to ./resunet_60_v3.h5
73/73 [==============================] - 5s 71ms/step - loss: 0.0436 - accuracy: 0.9584 - dice_loss: 0.1412 - iou: 0.7536 - val_loss: 0.1247 - val_accuracy: 0.9395 - val_dice_loss: 0.2725 - val_iou: 0.5898
Epoch 21/60
73/73 [==============================] - ETA: 0s - loss: 0.0394 - accuracy: 0.9596 - dice_loss: 0.1260 - iou: 0.7769
Epoch 21: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 68ms/step - loss: 0.0394 - accuracy: 0.9596 - dice_loss: 0.1260 - iou: 0.7769 - val_loss: 0.1604 - val_accuracy: 0.9359 - val_dice_loss: 0.2806 - val_iou: 0.5832
Epoch 22/60
72/73 [============================>.] - ETA: 0s - loss: 0.0366 - accuracy: 0.9604 - dice_loss: 0.1203 - iou: 0.7863
Epoch 22: val_loss did not improve from 0.12471
73/73 [==============================] - 4s 60ms/step - loss: 0.0365 - accuracy: 0.9605 - dice_loss: 0.1200 - iou: 0.7867 - val_loss: 0.1647 - val_accuracy: 0.9342 - val_dice_loss: 0.2854 - val_iou: 0.5768
Epoch 23/60
72/73 [============================>.] - ETA: 0s - loss: 0.0345 - accuracy: 0.9611 - dice_loss: 0.1133 - iou: 0.7975
Epoch 23: val_loss did not improve from 0.12471
73/73 [==============================] - 4s 61ms/step - loss: 0.0346 - accuracy: 0.9610 - dice_loss: 0.1132 - iou: 0.7977 - val_loss: 0.1521 - val_accuracy: 0.9406 - val_dice_loss: 0.2678 - val_iou: 0.5958
Epoch 24/60
72/73 [============================>.] - ETA: 0s - loss: 0.0310 - accuracy: 0.9622 - dice_loss: 0.1036 - iou: 0.8130
Epoch 24: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 62ms/step - loss: 0.0312 - accuracy: 0.9620 - dice_loss: 0.1033 - iou: 0.8134 - val_loss: 0.1587 - val_accuracy: 0.9369 - val_dice_loss: 0.2679 - val_iou: 0.5923
Epoch 25/60
72/73 [============================>.] - ETA: 0s - loss: 0.0309 - accuracy: 0.9620 - dice_loss: 0.1003 - iou: 0.8184
Epoch 25: val_loss did not improve from 0.12471
73/73 [==============================] - 4s 60ms/step - loss: 0.0309 - accuracy: 0.9620 - dice_loss: 0.1005 - iou: 0.8181 - val_loss: 0.1876 - val_accuracy: 0.9373 - val_dice_loss: 0.2649 - val_iou: 0.5989
Epoch 26/60
72/73 [============================>.] - ETA: 0s - loss: 0.0338 - accuracy: 0.9613 - dice_loss: 0.1117 - iou: 0.8001
Epoch 26: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 63ms/step - loss: 0.0341 - accuracy: 0.9611 - dice_loss: 0.1118 - iou: 0.8000 - val_loss: 0.1454 - val_accuracy: 0.9361 - val_dice_loss: 0.2751 - val_iou: 0.5861
Epoch 27/60
72/73 [============================>.] - ETA: 0s - loss: 0.0333 - accuracy: 0.9615 - dice_loss: 0.1068 - iou: 0.8084
Epoch 27: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 62ms/step - loss: 0.0332 - accuracy: 0.9615 - dice_loss: 0.1064 - iou: 0.8091 - val_loss: 0.1736 - val_accuracy: 0.9317 - val_dice_loss: 0.2816 - val_iou: 0.5788
Epoch 28/60
72/73 [============================>.] - ETA: 0s - loss: 0.0309 - accuracy: 0.9621 - dice_loss: 0.1002 - iou: 0.8186
Epoch 28: val_loss did not improve from 0.12471
73/73 [==============================] - 4s 61ms/step - loss: 0.0308 - accuracy: 0.9622 - dice_loss: 0.1009 - iou: 0.8173 - val_loss: 0.1668 - val_accuracy: 0.9384 - val_dice_loss: 0.2479 - val_iou: 0.6209
Epoch 29/60
72/73 [============================>.] - ETA: 0s - loss: 0.0265 - accuracy: 0.9634 - dice_loss: 0.0889 - iou: 0.8375
Epoch 29: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 63ms/step - loss: 0.0265 - accuracy: 0.9634 - dice_loss: 0.0888 - iou: 0.8376 - val_loss: 0.1602 - val_accuracy: 0.9374 - val_dice_loss: 0.2470 - val_iou: 0.6218
Epoch 30/60
72/73 [============================>.] - ETA: 0s - loss: 0.0257 - accuracy: 0.9635 - dice_loss: 0.0824 - iou: 0.8481
Epoch 30: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 62ms/step - loss: 0.0256 - accuracy: 0.9637 - dice_loss: 0.0828 - iou: 0.8473 - val_loss: 0.1656 - val_accuracy: 0.9409 - val_dice_loss: 0.2458 - val_iou: 0.6233
Epoch 31/60
72/73 [============================>.] - ETA: 0s - loss: 0.0240 - accuracy: 0.9641 - dice_loss: 0.0789 - iou: 0.8540
Epoch 31: val_loss did not improve from 0.12471
73/73 [==============================] - 4s 60ms/step - loss: 0.0239 - accuracy: 0.9642 - dice_loss: 0.0785 - iou: 0.8547 - val_loss: 0.1833 - val_accuracy: 0.9373 - val_dice_loss: 0.2461 - val_iou: 0.6239
Epoch 32/60
72/73 [============================>.] - ETA: 0s - loss: 0.0221 - accuracy: 0.9647 - dice_loss: 0.0714 - iou: 0.8669
Epoch 32: val_loss did not improve from 0.12471
73/73 [==============================] - 4s 62ms/step - loss: 0.0221 - accuracy: 0.9647 - dice_loss: 0.0720 - iou: 0.8660 - val_loss: 0.1919 - val_accuracy: 0.9399 - val_dice_loss: 0.2376 - val_iou: 0.6352
Epoch 33/60
72/73 [============================>.] - ETA: 0s - loss: 0.0208 - accuracy: 0.9652 - dice_loss: 0.0693 - iou: 0.8708
Epoch 33: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 62ms/step - loss: 0.0209 - accuracy: 0.9651 - dice_loss: 0.0692 - iou: 0.8710 - val_loss: 0.1942 - val_accuracy: 0.9410 - val_dice_loss: 0.2364 - val_iou: 0.6377
Epoch 34/60
72/73 [============================>.] - ETA: 0s - loss: 0.0201 - accuracy: 0.9653 - dice_loss: 0.0643 - iou: 0.8794
Epoch 34: val_loss did not improve from 0.12471
73/73 [==============================] - 4s 60ms/step - loss: 0.0201 - accuracy: 0.9654 - dice_loss: 0.0643 - iou: 0.8794 - val_loss: 0.2160 - val_accuracy: 0.9380 - val_dice_loss: 0.2423 - val_iou: 0.6284
Epoch 35/60
72/73 [============================>.] - ETA: 0s - loss: 0.0206 - accuracy: 0.9652 - dice_loss: 0.0666 - iou: 0.8753
Epoch 35: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 63ms/step - loss: 0.0206 - accuracy: 0.9652 - dice_loss: 0.0664 - iou: 0.8757 - val_loss: 0.2242 - val_accuracy: 0.9369 - val_dice_loss: 0.2525 - val_iou: 0.6157
Epoch 36/60
72/73 [============================>.] - ETA: 0s - loss: 0.0198 - accuracy: 0.9653 - dice_loss: 0.0643 - iou: 0.8794
Epoch 36: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 63ms/step - loss: 0.0197 - accuracy: 0.9654 - dice_loss: 0.0654 - iou: 0.8775 - val_loss: 0.2115 - val_accuracy: 0.9385 - val_dice_loss: 0.2376 - val_iou: 0.6346
Epoch 37/60
72/73 [============================>.] - ETA: 0s - loss: 0.0197 - accuracy: 0.9654 - dice_loss: 0.0635 - iou: 0.8807
Epoch 37: val_loss did not improve from 0.12471
73/73 [==============================] - 4s 60ms/step - loss: 0.0196 - accuracy: 0.9654 - dice_loss: 0.0634 - iou: 0.8809 - val_loss: 0.2195 - val_accuracy: 0.9375 - val_dice_loss: 0.2508 - val_iou: 0.6192
Epoch 38/60
72/73 [============================>.] - ETA: 0s - loss: 0.0190 - accuracy: 0.9656 - dice_loss: 0.0615 - iou: 0.8842
Epoch 38: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 63ms/step - loss: 0.0190 - accuracy: 0.9656 - dice_loss: 0.0616 - iou: 0.8841 - val_loss: 0.1980 - val_accuracy: 0.9371 - val_dice_loss: 0.2398 - val_iou: 0.6251
Epoch 39/60
72/73 [============================>.] - ETA: 0s - loss: 0.0218 - accuracy: 0.9649 - dice_loss: 0.0690 - iou: 0.8712
Epoch 39: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 62ms/step - loss: 0.0220 - accuracy: 0.9647 - dice_loss: 0.0697 - iou: 0.8700 - val_loss: 0.2786 - val_accuracy: 0.9346 - val_dice_loss: 0.2870 - val_iou: 0.5738
Epoch 40/60
72/73 [============================>.] - ETA: 0s - loss: 0.0722 - accuracy: 0.9493 - dice_loss: 0.2121 - iou: 0.6562
Epoch 40: val_loss did not improve from 0.12471
73/73 [==============================] - 4s 60ms/step - loss: 0.0721 - accuracy: 0.9494 - dice_loss: 0.2115 - iou: 0.6569 - val_loss: 0.3065 - val_accuracy: 0.9051 - val_dice_loss: 0.3613 - val_iou: 0.4924
Epoch 41/60
72/73 [============================>.] - ETA: 0s - loss: 0.0488 - accuracy: 0.9565 - dice_loss: 0.1553 - iou: 0.7329
Epoch 41: val_loss did not improve from 0.12471
73/73 [==============================] - 4s 61ms/step - loss: 0.0488 - accuracy: 0.9566 - dice_loss: 0.1551 - iou: 0.7331 - val_loss: 0.1973 - val_accuracy: 0.9237 - val_dice_loss: 0.3018 - val_iou: 0.5573
Epoch 42/60
72/73 [============================>.] - ETA: 0s - loss: 0.0310 - accuracy: 0.9620 - dice_loss: 0.1013 - iou: 0.8171
Epoch 42: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 62ms/step - loss: 0.0309 - accuracy: 0.9621 - dice_loss: 0.1016 - iou: 0.8165 - val_loss: 0.1500 - val_accuracy: 0.9382 - val_dice_loss: 0.2657 - val_iou: 0.6023
Epoch 43/60
72/73 [============================>.] - ETA: 0s - loss: 0.0256 - accuracy: 0.9637 - dice_loss: 0.0855 - iou: 0.8434
Epoch 43: val_loss did not improve from 0.12471
73/73 [==============================] - 4s 61ms/step - loss: 0.0257 - accuracy: 0.9637 - dice_loss: 0.0857 - iou: 0.8430 - val_loss: 0.1604 - val_accuracy: 0.9352 - val_dice_loss: 0.2618 - val_iou: 0.6077
Epoch 44/60
72/73 [============================>.] - ETA: 0s - loss: 0.0214 - accuracy: 0.9648 - dice_loss: 0.0702 - iou: 0.8693
Epoch 44: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 63ms/step - loss: 0.0213 - accuracy: 0.9649 - dice_loss: 0.0711 - iou: 0.8679 - val_loss: 0.1721 - val_accuracy: 0.9364 - val_dice_loss: 0.2761 - val_iou: 0.5865
Epoch 45/60
72/73 [============================>.] - ETA: 0s - loss: 0.0225 - accuracy: 0.9649 - dice_loss: 0.0727 - iou: 0.8652
Epoch 45: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 69ms/step - loss: 0.0225 - accuracy: 0.9649 - dice_loss: 0.0726 - iou: 0.8654 - val_loss: 0.1674 - val_accuracy: 0.9388 - val_dice_loss: 0.2471 - val_iou: 0.6245
Epoch 46/60
72/73 [============================>.] - ETA: 0s - loss: 0.0181 - accuracy: 0.9659 - dice_loss: 0.0585 - iou: 0.8897
Epoch 46: val_loss did not improve from 0.12471
73/73 [==============================] - 4s 61ms/step - loss: 0.0181 - accuracy: 0.9660 - dice_loss: 0.0589 - iou: 0.8890 - val_loss: 0.1781 - val_accuracy: 0.9390 - val_dice_loss: 0.2466 - val_iou: 0.6276
Epoch 47/60
72/73 [============================>.] - ETA: 0s - loss: 0.0160 - accuracy: 0.9666 - dice_loss: 0.0513 - iou: 0.9025
Epoch 47: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 64ms/step - loss: 0.0159 - accuracy: 0.9666 - dice_loss: 0.0511 - iou: 0.9029 - val_loss: 0.1881 - val_accuracy: 0.9400 - val_dice_loss: 0.2443 - val_iou: 0.6284
Epoch 48/60
72/73 [============================>.] - ETA: 0s - loss: 0.0155 - accuracy: 0.9666 - dice_loss: 0.0488 - iou: 0.9071
Epoch 48: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 62ms/step - loss: 0.0155 - accuracy: 0.9667 - dice_loss: 0.0488 - iou: 0.9070 - val_loss: 0.1946 - val_accuracy: 0.9387 - val_dice_loss: 0.2465 - val_iou: 0.6276
Epoch 49/60
72/73 [============================>.] - ETA: 0s - loss: 0.0146 - accuracy: 0.9670 - dice_loss: 0.0479 - iou: 0.9088
Epoch 49: val_loss did not improve from 0.12471
73/73 [==============================] - 4s 61ms/step - loss: 0.0146 - accuracy: 0.9669 - dice_loss: 0.0478 - iou: 0.9090 - val_loss: 0.2087 - val_accuracy: 0.9388 - val_dice_loss: 0.2495 - val_iou: 0.6245
Epoch 50/60
72/73 [============================>.] - ETA: 0s - loss: 0.0146 - accuracy: 0.9669 - dice_loss: 0.0478 - iou: 0.9090
Epoch 50: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 63ms/step - loss: 0.0146 - accuracy: 0.9669 - dice_loss: 0.0476 - iou: 0.9094 - val_loss: 0.1996 - val_accuracy: 0.9388 - val_dice_loss: 0.2437 - val_iou: 0.6303
Epoch 51/60
72/73 [============================>.] - ETA: 0s - loss: 0.0140 - accuracy: 0.9671 - dice_loss: 0.0451 - iou: 0.9139
Epoch 51: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 62ms/step - loss: 0.0140 - accuracy: 0.9671 - dice_loss: 0.0451 - iou: 0.9138 - val_loss: 0.1952 - val_accuracy: 0.9385 - val_dice_loss: 0.2411 - val_iou: 0.6345
Epoch 52/60
72/73 [============================>.] - ETA: 0s - loss: 0.0136 - accuracy: 0.9672 - dice_loss: 0.0443 - iou: 0.9152
Epoch 52: val_loss did not improve from 0.12471
73/73 [==============================] - 4s 61ms/step - loss: 0.0136 - accuracy: 0.9672 - dice_loss: 0.0442 - iou: 0.9154 - val_loss: 0.2055 - val_accuracy: 0.9393 - val_dice_loss: 0.2304 - val_iou: 0.6473
Epoch 53/60
72/73 [============================>.] - ETA: 0s - loss: 0.0131 - accuracy: 0.9673 - dice_loss: 0.0420 - iou: 0.9195
Epoch 53: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 63ms/step - loss: 0.0131 - accuracy: 0.9673 - dice_loss: 0.0422 - iou: 0.9192 - val_loss: 0.2161 - val_accuracy: 0.9380 - val_dice_loss: 0.2408 - val_iou: 0.6362
Epoch 54/60
72/73 [============================>.] - ETA: 0s - loss: 0.0125 - accuracy: 0.9676 - dice_loss: 0.0406 - iou: 0.9220
Epoch 54: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 63ms/step - loss: 0.0125 - accuracy: 0.9675 - dice_loss: 0.0405 - iou: 0.9222 - val_loss: 0.2225 - val_accuracy: 0.9398 - val_dice_loss: 0.2329 - val_iou: 0.6464
Epoch 55/60
72/73 [============================>.] - ETA: 0s - loss: 0.0125 - accuracy: 0.9675 - dice_loss: 0.0403 - iou: 0.9226
Epoch 55: val_loss did not improve from 0.12471
73/73 [==============================] - 4s 60ms/step - loss: 0.0126 - accuracy: 0.9675 - dice_loss: 0.0403 - iou: 0.9226 - val_loss: 0.2132 - val_accuracy: 0.9384 - val_dice_loss: 0.2353 - val_iou: 0.6404
Epoch 56/60
72/73 [============================>.] - ETA: 0s - loss: 0.0121 - accuracy: 0.9676 - dice_loss: 0.0398 - iou: 0.9236
Epoch 56: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 64ms/step - loss: 0.0121 - accuracy: 0.9676 - dice_loss: 0.0396 - iou: 0.9239 - val_loss: 0.2224 - val_accuracy: 0.9388 - val_dice_loss: 0.2363 - val_iou: 0.6401
Epoch 57/60
72/73 [============================>.] - ETA: 0s - loss: 0.0118 - accuracy: 0.9678 - dice_loss: 0.0375 - iou: 0.9277
Epoch 57: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 62ms/step - loss: 0.0118 - accuracy: 0.9677 - dice_loss: 0.0375 - iou: 0.9278 - val_loss: 0.2244 - val_accuracy: 0.9390 - val_dice_loss: 0.2356 - val_iou: 0.6403
Epoch 58/60
72/73 [============================>.] - ETA: 0s - loss: 0.0113 - accuracy: 0.9679 - dice_loss: 0.0363 - iou: 0.9301
Epoch 58: val_loss did not improve from 0.12471
73/73 [==============================] - 4s 61ms/step - loss: 0.0114 - accuracy: 0.9678 - dice_loss: 0.0363 - iou: 0.9300 - val_loss: 0.2267 - val_accuracy: 0.9389 - val_dice_loss: 0.2317 - val_iou: 0.6470
Epoch 59/60
72/73 [============================>.] - ETA: 0s - loss: 0.0110 - accuracy: 0.9679 - dice_loss: 0.0357 - iou: 0.9312
Epoch 59: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 63ms/step - loss: 0.0110 - accuracy: 0.9679 - dice_loss: 0.0357 - iou: 0.9311 - val_loss: 0.2268 - val_accuracy: 0.9390 - val_dice_loss: 0.2335 - val_iou: 0.6433
Epoch 60/60
72/73 [============================>.] - ETA: 0s - loss: 0.0106 - accuracy: 0.9682 - dice_loss: 0.0343 - iou: 0.9337
Epoch 60: val_loss did not improve from 0.12471
73/73 [==============================] - 5s 62ms/step - loss: 0.0107 - accuracy: 0.9680 - dice_loss: 0.0344 - iou: 0.9336 - val_loss: 0.2339 - val_accuracy: 0.9386 - val_dice_loss: 0.2313 - val_iou: 0.6462

Model performence¶

In [ ]:
plt.figure(figsize=(20,5))

plt.subplot(1,4,1)
plt.title("Model Loss")
plt.plot(history.history['loss'], label="Training")
plt.plot(history.history['val_loss'], label="Validation")
plt.legend()
plt.grid()

plt.subplot(1,4,2)
plt.title("Model Accuracy")
plt.plot(history.history['accuracy'], label="Training")
plt.plot(history.history['val_accuracy'], label="Validation")
plt.legend()
plt.grid()

plt.subplot(1,4,3)
plt.title("IOU")
plt.plot(history.history['iou'], label="Training")
plt.plot(history.history['val_iou'], label="Validation")
plt.legend()
plt.grid()

plt.subplot(1,4,4)
plt.title("Model Dice Loss")
plt.plot(history.history['dice_loss'], label="Training")
plt.plot(history.history['val_dice_loss'], label="Validation")
plt.legend()
plt.grid()

plt.show()

Predictions¶

In [ ]:
res_unet_model = load_model('./resunet_60_v2.h5',custom_objects={'dice_loss': dice_loss,"iou":iou})
In [ ]:
res_unet_pred = res_unet_model.predict(scan_test)
3/3 [==============================] - 5s 33ms/step
In [ ]:
plt.figure(figsize = (10,60))

i = 0
x = 0
while i < 45 :

    plt.subplot(15,3,i+1)
    plt.imshow(scan_test[x], 'gray')
    plt.title('Real medic Image')
    plt.axis('off')

    plt.subplot(15,3,i+2)
    plt.imshow(mask_test[x], 'gray')
    plt.title('Ground Truth Img')
    plt.axis('off')

    plt.subplot(15,3,i+3)
    plt.imshow(res_unet_pred[x], 'gray')
    plt.title('Predicited Image')
    plt.axis('off')

    x += 1
    i += 3
plt.show()

SegNet¶

Building the model¶

https://github.com/kulkarnikeerti/SegNet-Semantic-Segmentation/blob/master/SegNet_Model.ipynb

In [ ]:
class MaxPoolingWithArgmax2D(Layer):
    def __init__(
            self,
            pool_size=(2, 2),
            strides=(2, 2),
            padding='same',
            **kwargs):
        super(MaxPoolingWithArgmax2D, self).__init__(**kwargs)
        self.padding = padding
        self.pool_size = pool_size
        self.strides = strides

    def call(self, inputs, **kwargs):
        padding = self.padding
        pool_size = self.pool_size
        strides = self.strides
        ksize = [1, pool_size[0], pool_size[1], 1]
        padding = padding.upper()
        strides = [1, strides[0], strides[1], 1]
        output, argmax = tf.nn.max_pool_with_argmax(
            inputs,
            ksize=ksize,
            strides=strides,
            padding=padding
        )
        argmax = K.cast(argmax, K.floatx())
        return [output, argmax]

    def compute_output_shape(self, input_shape):
        ratio = (1, 2, 2, 1)
        output_shape = [
            dim // ratio[idx] if dim is not None else None
            for idx, dim in enumerate(input_shape)]
        output_shape = tuple(output_shape)
        return [output_shape, output_shape]

    def compute_mask(self, inputs, mask=None):
        return 2 * [None]

class MaxUnpooling2D(Layer):
    def __init__(self, size=(2, 2), **kwargs):
        super(MaxUnpooling2D, self).__init__(**kwargs)
        self.size = size

    def call(self, inputs, output_shape=None):
        # one is pool and one is mask
        updates, mask = inputs[0], inputs[1]

        mask = K.cast(mask, 'int32')
        input_shape = tf.shape(updates, out_type='int32')
        if output_shape is None:
            output_shape = (
                input_shape[0],
                input_shape[1] * self.size[0],
                input_shape[2] * self.size[1],
                input_shape[3]
            )
        self.output_shape1 = output_shape

        one_like_mask = K.ones_like(mask, dtype='int32')
        batch_shape = K.concatenate([[input_shape[0]], [1], [1], [1]], axis=0)
        batch_range = K.reshape(tf.range(output_shape[0], dtype='int32'), shape=batch_shape)
        b = one_like_mask * batch_range

        y = mask // (output_shape[2] * output_shape[3])
        x = (mask // output_shape[3]) % output_shape[2]
        feature_range = tf.range(output_shape[3], dtype='int32')
        f = one_like_mask * feature_range

        indices = K.transpose(K.reshape(K.stack([b, y, x, f]), [4, -1]))
        values = K.reshape(updates, [-1])
        ret = tf.scatter_nd(indices, values, output_shape)
        return ret

    def compute_output_shape(self, input_shape):
        mask_shape = input_shape[1]
        return (
            mask_shape[0],
            mask_shape[1] * self.size[0],
            mask_shape[2] * self.size[1],
            mask_shape[3]
        )
In [ ]:
def segnet(
        input_shape,
        n_labels,
        kernel=3,
        pool_size=(2, 2),
        output_mode="softmax"):
    # encoder
    inputs = Input(shape=input_shape)

    conv_1 = Conv2D(64, (kernel, kernel), padding="same", kernel_initializer='he_normal', name="block1_conv1")(inputs)
    conv_1 = BatchNormalization()(conv_1)
    conv_1 = Activation("relu")(conv_1)
    conv_2 = Conv2D(64, (kernel, kernel), padding="same", kernel_initializer='he_normal', name="block1_conv2")(conv_1)
    conv_2 = BatchNormalization()(conv_2)
    conv_2 = Activation("relu")(conv_2)
    pool_1, mask_1 = MaxPoolingWithArgmax2D(pool_size, name="block1_pool")(conv_2)

    conv_3 = Conv2D(128, (kernel, kernel), padding="same" , kernel_initializer='he_normal', name="block2_conv1")(pool_1)
    conv_3 = BatchNormalization()(conv_3)
    conv_3 = Activation("relu")(conv_3)
    conv_4 = Conv2D(128, (kernel, kernel), padding="same", kernel_initializer='he_normal',  name="block2_conv2")(conv_3)
    conv_4 = BatchNormalization()(conv_4)
    conv_4 = Activation("relu")(conv_4)

    pool_2, mask_2 = MaxPoolingWithArgmax2D(pool_size, name="block2_pool")(conv_4)

    conv_5 = Conv2D(256, (kernel, kernel), padding="same" , kernel_initializer='he_normal', name="block3_conv1")(pool_2)
    conv_5 = BatchNormalization()(conv_5)
    conv_5 = Activation("relu")(conv_5)
    conv_6 = Conv2D(256, (kernel, kernel), padding="same" , kernel_initializer='he_normal', name="block3_conv2")(conv_5)
    conv_6 = BatchNormalization()(conv_6)
    conv_6 = Activation("relu")(conv_6)
    conv_7 = Conv2D(256, (kernel, kernel), padding="same" , kernel_initializer='he_normal', name="block3_conv3")(conv_6)
    conv_7 = BatchNormalization()(conv_7)
    conv_7 = Activation("relu")(conv_7)

    pool_3, mask_3 = MaxPoolingWithArgmax2D(pool_size, name="block3_pool")(conv_7)

    conv_8 = Conv2D(512, (kernel, kernel), padding="same", kernel_initializer='he_normal', name="block4_conv1")(pool_3)
    conv_8 = BatchNormalization()(conv_8)
    conv_8 = Activation("relu")(conv_8)
    conv_9 = Conv2D(512, (kernel, kernel), padding="same" , kernel_initializer='he_normal', name="block4_conv2")(conv_8)
    conv_9 = BatchNormalization()(conv_9)
    conv_9 = Activation("relu")(conv_9)
    conv_10 = Conv2D(512, (kernel, kernel), padding="same" , kernel_initializer='he_normal', name="block4_conv3")(conv_9)
    conv_10 = BatchNormalization()(conv_10)
    conv_10 = Activation("relu")(conv_10)

    pool_4, mask_4 = MaxPoolingWithArgmax2D(pool_size, name="block4_pool")(conv_10)

    conv_11 = Conv2D(512, (kernel, kernel), padding="same", kernel_initializer='he_normal', name="block5_conv1")(pool_4)
    conv_11 = BatchNormalization()(conv_11)
    conv_11 = Activation("relu")(conv_11)
    conv_12 = Conv2D(512, (kernel, kernel), padding="same" , kernel_initializer='he_normal',  name="block5_conv2")(conv_11)
    conv_12 = BatchNormalization()(conv_12)
    conv_12 = Activation("relu")(conv_12)
    conv_13 = Conv2D(512, (kernel, kernel), padding="same" , kernel_initializer='he_normal',  name="block5_conv3")(conv_12)
    conv_13 = BatchNormalization()(conv_13)
    conv_13 = Activation("relu")(conv_13)

    pool_5, mask_5 = MaxPoolingWithArgmax2D(pool_size, name="block5_pool")(conv_13)

    # decoder
    unpool_1 = MaxUnpooling2D(pool_size)([pool_5, mask_5])

    conv_14 = Conv2D(512, (kernel, kernel), padding="same", kernel_initializer='he_normal')(unpool_1)
    conv_14 = BatchNormalization()(conv_14)
    conv_14 = Activation("relu")(conv_14)
    conv_15 = Conv2D(512, (kernel, kernel), padding="same", kernel_initializer='he_normal')(conv_14)
    conv_15 = BatchNormalization()(conv_15)
    conv_15 = Activation("relu")(conv_15)
    conv_16 = Conv2D(512, (kernel, kernel), padding="same", kernel_initializer='he_normal')(conv_15)
    conv_16 = BatchNormalization()(conv_16)
    conv_16 = Activation("relu")(conv_16)

    unpool_2 = MaxUnpooling2D(pool_size)([conv_16, mask_4])

    conv_17 = Conv2D(512, (kernel, kernel), padding="same", kernel_initializer='he_normal')(unpool_2)
    conv_17 = BatchNormalization()(conv_17)
    conv_17 = Activation("relu")(conv_17)
    conv_18 = Conv2D(512, (kernel, kernel), padding="same", kernel_initializer='he_normal')(conv_17)
    conv_18 = BatchNormalization()(conv_18)
    conv_18 = Activation("relu")(conv_18)
    conv_19 = Conv2D(256, (kernel, kernel), padding="same", kernel_initializer='he_normal')(conv_18)
    conv_19 = BatchNormalization()(conv_19)
    conv_19 = Activation("relu")(conv_19)

    unpool_3 = MaxUnpooling2D(pool_size)([conv_19, mask_3])

    conv_20 = Conv2D(256, (kernel, kernel), padding="same", kernel_initializer='he_normal')(unpool_3)
    conv_20 = BatchNormalization()(conv_20)
    conv_20 = Activation("relu")(conv_20)
    conv_21 = Conv2D(256, (kernel, kernel), padding="same", kernel_initializer='he_normal')(conv_20)
    conv_21 = BatchNormalization()(conv_21)
    conv_21 = Activation("relu")(conv_21)
    conv_22 = Conv2D(128, (kernel, kernel), padding="same", kernel_initializer='he_normal')(conv_21)
    conv_22 = BatchNormalization()(conv_22)
    conv_22 = Activation("relu")(conv_22)

    unpool_4 = MaxUnpooling2D(pool_size)([conv_22, mask_2])

    conv_23 = Conv2D(128, (kernel, kernel), padding="same", kernel_initializer='he_normal')(unpool_4)
    conv_23 = BatchNormalization()(conv_23)
    conv_23 = Activation("relu")(conv_23)
    conv_24 = Conv2D(64, (kernel, kernel), padding="same", kernel_initializer='he_normal')(conv_23)
    conv_24 = BatchNormalization()(conv_24)
    conv_24 = Activation("relu")(conv_24)

    unpool_5 = MaxUnpooling2D(pool_size)([conv_24, mask_1])

    conv_25 = Conv2D(64, (kernel, kernel), padding="same", kernel_initializer='he_normal')(unpool_5)
    conv_25 = BatchNormalization()(conv_25)
    conv_25 = Activation("relu")(conv_25)

    conv_26 = Conv2D(1, (1, 1), padding="same", kernel_initializer='he_normal')(conv_25)
    conv_26 = BatchNormalization()(conv_26)
    outputs = Activation("sigmoid")(conv_26)

    model = Model(inputs=inputs, outputs=outputs, name="SegNet")

    return model
In [ ]:
seg_model= segnet(input_shape=(128,128,1), n_labels=2)
In [ ]:
seg_model.summary()
Model: "SegNet"
__________________________________________________________________________________________________
 Layer (type)                Output Shape                 Param #   Connected to                  
==================================================================================================
 input_4 (InputLayer)        [(None, 128, 128, 1)]        0         []                            
                                                                                                  
 block1_conv1 (Conv2D)       (None, 128, 128, 64)         640       ['input_4[0][0]']             
                                                                                                  
 batch_normalization_32 (Ba  (None, 128, 128, 64)         256       ['block1_conv1[0][0]']        
 tchNormalization)                                                                                
                                                                                                  
 activation_19 (Activation)  (None, 128, 128, 64)         0         ['batch_normalization_32[0][0]
                                                                    ']                            
                                                                                                  
 block1_conv2 (Conv2D)       (None, 128, 128, 64)         36928     ['activation_19[0][0]']       
                                                                                                  
 batch_normalization_33 (Ba  (None, 128, 128, 64)         256       ['block1_conv2[0][0]']        
 tchNormalization)                                                                                
                                                                                                  
 activation_20 (Activation)  (None, 128, 128, 64)         0         ['batch_normalization_33[0][0]
                                                                    ']                            
                                                                                                  
 block1_pool (MaxPoolingWit  [(None, 64, 64, 64),         0         ['activation_20[0][0]']       
 hArgmax2D)                   (None, 64, 64, 64)]                                                 
                                                                                                  
 block2_conv1 (Conv2D)       (None, 64, 64, 128)          73856     ['block1_pool[0][0]']         
                                                                                                  
 batch_normalization_34 (Ba  (None, 64, 64, 128)          512       ['block2_conv1[0][0]']        
 tchNormalization)                                                                                
                                                                                                  
 activation_21 (Activation)  (None, 64, 64, 128)          0         ['batch_normalization_34[0][0]
                                                                    ']                            
                                                                                                  
 block2_conv2 (Conv2D)       (None, 64, 64, 128)          147584    ['activation_21[0][0]']       
                                                                                                  
 batch_normalization_35 (Ba  (None, 64, 64, 128)          512       ['block2_conv2[0][0]']        
 tchNormalization)                                                                                
                                                                                                  
 activation_22 (Activation)  (None, 64, 64, 128)          0         ['batch_normalization_35[0][0]
                                                                    ']                            
                                                                                                  
 block2_pool (MaxPoolingWit  [(None, 32, 32, 128),        0         ['activation_22[0][0]']       
 hArgmax2D)                   (None, 32, 32, 128)]                                                
                                                                                                  
 block3_conv1 (Conv2D)       (None, 32, 32, 256)          295168    ['block2_pool[0][0]']         
                                                                                                  
 batch_normalization_36 (Ba  (None, 32, 32, 256)          1024      ['block3_conv1[0][0]']        
 tchNormalization)                                                                                
                                                                                                  
 activation_23 (Activation)  (None, 32, 32, 256)          0         ['batch_normalization_36[0][0]
                                                                    ']                            
                                                                                                  
 block3_conv2 (Conv2D)       (None, 32, 32, 256)          590080    ['activation_23[0][0]']       
                                                                                                  
 batch_normalization_37 (Ba  (None, 32, 32, 256)          1024      ['block3_conv2[0][0]']        
 tchNormalization)                                                                                
                                                                                                  
 activation_24 (Activation)  (None, 32, 32, 256)          0         ['batch_normalization_37[0][0]
                                                                    ']                            
                                                                                                  
 block3_conv3 (Conv2D)       (None, 32, 32, 256)          590080    ['activation_24[0][0]']       
                                                                                                  
 batch_normalization_38 (Ba  (None, 32, 32, 256)          1024      ['block3_conv3[0][0]']        
 tchNormalization)                                                                                
                                                                                                  
 activation_25 (Activation)  (None, 32, 32, 256)          0         ['batch_normalization_38[0][0]
                                                                    ']                            
                                                                                                  
 block3_pool (MaxPoolingWit  [(None, 16, 16, 256),        0         ['activation_25[0][0]']       
 hArgmax2D)                   (None, 16, 16, 256)]                                                
                                                                                                  
 block4_conv1 (Conv2D)       (None, 16, 16, 512)          1180160   ['block3_pool[0][0]']         
                                                                                                  
 batch_normalization_39 (Ba  (None, 16, 16, 512)          2048      ['block4_conv1[0][0]']        
 tchNormalization)                                                                                
                                                                                                  
 activation_26 (Activation)  (None, 16, 16, 512)          0         ['batch_normalization_39[0][0]
                                                                    ']                            
                                                                                                  
 block4_conv2 (Conv2D)       (None, 16, 16, 512)          2359808   ['activation_26[0][0]']       
                                                                                                  
 batch_normalization_40 (Ba  (None, 16, 16, 512)          2048      ['block4_conv2[0][0]']        
 tchNormalization)                                                                                
                                                                                                  
 activation_27 (Activation)  (None, 16, 16, 512)          0         ['batch_normalization_40[0][0]
                                                                    ']                            
                                                                                                  
 block4_conv3 (Conv2D)       (None, 16, 16, 512)          2359808   ['activation_27[0][0]']       
                                                                                                  
 batch_normalization_41 (Ba  (None, 16, 16, 512)          2048      ['block4_conv3[0][0]']        
 tchNormalization)                                                                                
                                                                                                  
 activation_28 (Activation)  (None, 16, 16, 512)          0         ['batch_normalization_41[0][0]
                                                                    ']                            
                                                                                                  
 block4_pool (MaxPoolingWit  [(None, 8, 8, 512),          0         ['activation_28[0][0]']       
 hArgmax2D)                   (None, 8, 8, 512)]                                                  
                                                                                                  
 block5_conv1 (Conv2D)       (None, 8, 8, 512)            2359808   ['block4_pool[0][0]']         
                                                                                                  
 batch_normalization_42 (Ba  (None, 8, 8, 512)            2048      ['block5_conv1[0][0]']        
 tchNormalization)                                                                                
                                                                                                  
 activation_29 (Activation)  (None, 8, 8, 512)            0         ['batch_normalization_42[0][0]
                                                                    ']                            
                                                                                                  
 block5_conv2 (Conv2D)       (None, 8, 8, 512)            2359808   ['activation_29[0][0]']       
                                                                                                  
 batch_normalization_43 (Ba  (None, 8, 8, 512)            2048      ['block5_conv2[0][0]']        
 tchNormalization)                                                                                
                                                                                                  
 activation_30 (Activation)  (None, 8, 8, 512)            0         ['batch_normalization_43[0][0]
                                                                    ']                            
                                                                                                  
 block5_conv3 (Conv2D)       (None, 8, 8, 512)            2359808   ['activation_30[0][0]']       
                                                                                                  
 batch_normalization_44 (Ba  (None, 8, 8, 512)            2048      ['block5_conv3[0][0]']        
 tchNormalization)                                                                                
                                                                                                  
 activation_31 (Activation)  (None, 8, 8, 512)            0         ['batch_normalization_44[0][0]
                                                                    ']                            
                                                                                                  
 block5_pool (MaxPoolingWit  [(None, 4, 4, 512),          0         ['activation_31[0][0]']       
 hArgmax2D)                   (None, 4, 4, 512)]                                                  
                                                                                                  
 max_unpooling2d (MaxUnpool  (None, 8, 8, 512)            0         ['block5_pool[0][0]',         
 ing2D)                                                              'block5_pool[0][1]']         
                                                                                                  
 conv2d_80 (Conv2D)          (None, 8, 8, 512)            2359808   ['max_unpooling2d[0][0]']     
                                                                                                  
 batch_normalization_45 (Ba  (None, 8, 8, 512)            2048      ['conv2d_80[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 activation_32 (Activation)  (None, 8, 8, 512)            0         ['batch_normalization_45[0][0]
                                                                    ']                            
                                                                                                  
 conv2d_81 (Conv2D)          (None, 8, 8, 512)            2359808   ['activation_32[0][0]']       
                                                                                                  
 batch_normalization_46 (Ba  (None, 8, 8, 512)            2048      ['conv2d_81[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 activation_33 (Activation)  (None, 8, 8, 512)            0         ['batch_normalization_46[0][0]
                                                                    ']                            
                                                                                                  
 conv2d_82 (Conv2D)          (None, 8, 8, 512)            2359808   ['activation_33[0][0]']       
                                                                                                  
 batch_normalization_47 (Ba  (None, 8, 8, 512)            2048      ['conv2d_82[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 activation_34 (Activation)  (None, 8, 8, 512)            0         ['batch_normalization_47[0][0]
                                                                    ']                            
                                                                                                  
 max_unpooling2d_1 (MaxUnpo  (None, 16, 16, 512)          0         ['activation_34[0][0]',       
 oling2D)                                                            'block4_pool[0][1]']         
                                                                                                  
 conv2d_83 (Conv2D)          (None, 16, 16, 512)          2359808   ['max_unpooling2d_1[0][0]']   
                                                                                                  
 batch_normalization_48 (Ba  (None, 16, 16, 512)          2048      ['conv2d_83[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 activation_35 (Activation)  (None, 16, 16, 512)          0         ['batch_normalization_48[0][0]
                                                                    ']                            
                                                                                                  
 conv2d_84 (Conv2D)          (None, 16, 16, 512)          2359808   ['activation_35[0][0]']       
                                                                                                  
 batch_normalization_49 (Ba  (None, 16, 16, 512)          2048      ['conv2d_84[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 activation_36 (Activation)  (None, 16, 16, 512)          0         ['batch_normalization_49[0][0]
                                                                    ']                            
                                                                                                  
 conv2d_85 (Conv2D)          (None, 16, 16, 256)          1179904   ['activation_36[0][0]']       
                                                                                                  
 batch_normalization_50 (Ba  (None, 16, 16, 256)          1024      ['conv2d_85[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 activation_37 (Activation)  (None, 16, 16, 256)          0         ['batch_normalization_50[0][0]
                                                                    ']                            
                                                                                                  
 max_unpooling2d_2 (MaxUnpo  (None, 32, 32, 256)          0         ['activation_37[0][0]',       
 oling2D)                                                            'block3_pool[0][1]']         
                                                                                                  
 conv2d_86 (Conv2D)          (None, 32, 32, 256)          590080    ['max_unpooling2d_2[0][0]']   
                                                                                                  
 batch_normalization_51 (Ba  (None, 32, 32, 256)          1024      ['conv2d_86[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 activation_38 (Activation)  (None, 32, 32, 256)          0         ['batch_normalization_51[0][0]
                                                                    ']                            
                                                                                                  
 conv2d_87 (Conv2D)          (None, 32, 32, 256)          590080    ['activation_38[0][0]']       
                                                                                                  
 batch_normalization_52 (Ba  (None, 32, 32, 256)          1024      ['conv2d_87[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 activation_39 (Activation)  (None, 32, 32, 256)          0         ['batch_normalization_52[0][0]
                                                                    ']                            
                                                                                                  
 conv2d_88 (Conv2D)          (None, 32, 32, 128)          295040    ['activation_39[0][0]']       
                                                                                                  
 batch_normalization_53 (Ba  (None, 32, 32, 128)          512       ['conv2d_88[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 activation_40 (Activation)  (None, 32, 32, 128)          0         ['batch_normalization_53[0][0]
                                                                    ']                            
                                                                                                  
 max_unpooling2d_3 (MaxUnpo  (None, 64, 64, 128)          0         ['activation_40[0][0]',       
 oling2D)                                                            'block2_pool[0][1]']         
                                                                                                  
 conv2d_89 (Conv2D)          (None, 64, 64, 128)          147584    ['max_unpooling2d_3[0][0]']   
                                                                                                  
 batch_normalization_54 (Ba  (None, 64, 64, 128)          512       ['conv2d_89[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 activation_41 (Activation)  (None, 64, 64, 128)          0         ['batch_normalization_54[0][0]
                                                                    ']                            
                                                                                                  
 conv2d_90 (Conv2D)          (None, 64, 64, 64)           73792     ['activation_41[0][0]']       
                                                                                                  
 batch_normalization_55 (Ba  (None, 64, 64, 64)           256       ['conv2d_90[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 activation_42 (Activation)  (None, 64, 64, 64)           0         ['batch_normalization_55[0][0]
                                                                    ']                            
                                                                                                  
 max_unpooling2d_4 (MaxUnpo  (None, 128, 128, 64)         0         ['activation_42[0][0]',       
 oling2D)                                                            'block1_pool[0][1]']         
                                                                                                  
 conv2d_91 (Conv2D)          (None, 128, 128, 64)         36928     ['max_unpooling2d_4[0][0]']   
                                                                                                  
 batch_normalization_56 (Ba  (None, 128, 128, 64)         256       ['conv2d_91[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 activation_43 (Activation)  (None, 128, 128, 64)         0         ['batch_normalization_56[0][0]
                                                                    ']                            
                                                                                                  
 conv2d_92 (Conv2D)          (None, 128, 128, 1)          65        ['activation_43[0][0]']       
                                                                                                  
 batch_normalization_57 (Ba  (None, 128, 128, 1)          4         ['conv2d_92[0][0]']           
 tchNormalization)                                                                                
                                                                                                  
 activation_44 (Activation)  (None, 128, 128, 1)          0         ['batch_normalization_57[0][0]
                                                                    ']                            
                                                                                                  
==================================================================================================
Total params: 29457797 (112.37 MB)
Trainable params: 29441923 (112.31 MB)
Non-trainable params: 15874 (62.01 KB)
__________________________________________________________________________________________________
In [ ]:
#sgd = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.9)
seg_model.compile(loss="binary_crossentropy", optimizer='adam', metrics=['acc',iou,dice_loss])
In [ ]:
keras.utils.plot_model(seg_model, './segnet_model_plot.png', show_shapes = True)
Out[ ]:

Training¶

In [ ]:
checkp = ModelCheckpoint('./segnet_200_v3.h5',save_best_only = True, verbose = 1)
In [ ]:
history =seg_model.fit(scan_train, mask_train, epochs = 200,
                         batch_size = 8, validation_data = (scan_test, mask_test), callbacks = [checkp])
Epoch 1/200
73/73 [==============================] - ETA: 0s - loss: 0.6811 - acc: 0.8228 - iou: 0.0994 - dice_loss: 0.8204
Epoch 1: val_loss improved from inf to 143.64423, saving model to ./segnet_200_v3.h5
/usr/local/lib/python3.10/dist-packages/keras/src/engine/training.py:3079: UserWarning: You are saving your model as an HDF5 file via `model.save()`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')`.
  saving_api.save_model(
73/73 [==============================] - 57s 315ms/step - loss: 0.6811 - acc: 0.8228 - iou: 0.0994 - dice_loss: 0.8204 - val_loss: 143.6442 - val_acc: 0.4757 - val_iou: 0.0888 - val_dice_loss: 0.8392
Epoch 2/200
73/73 [==============================] - ETA: 0s - loss: 0.5961 - acc: 0.8428 - iou: 0.1247 - dice_loss: 0.7812
Epoch 2: val_loss improved from 143.64423 to 6.32681, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 20s 277ms/step - loss: 0.5961 - acc: 0.8428 - iou: 0.1247 - dice_loss: 0.7812 - val_loss: 6.3268 - val_acc: 0.4147 - val_iou: 0.1051 - val_dice_loss: 0.8123
Epoch 3/200
73/73 [==============================] - ETA: 0s - loss: 0.5492 - acc: 0.8669 - iou: 0.1363 - dice_loss: 0.7629
Epoch 3: val_loss improved from 6.32681 to 1.45392, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 20s 275ms/step - loss: 0.5492 - acc: 0.8669 - iou: 0.1363 - dice_loss: 0.7629 - val_loss: 1.4539 - val_acc: 0.6813 - val_iou: 0.1258 - val_dice_loss: 0.7798
Epoch 4/200
73/73 [==============================] - ETA: 0s - loss: 0.5104 - acc: 0.8837 - iou: 0.1467 - dice_loss: 0.7462
Epoch 4: val_loss improved from 1.45392 to 0.76951, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 19s 259ms/step - loss: 0.5104 - acc: 0.8837 - iou: 0.1467 - dice_loss: 0.7462 - val_loss: 0.7695 - val_acc: 0.7605 - val_iou: 0.1370 - val_dice_loss: 0.7628
Epoch 5/200
73/73 [==============================] - ETA: 0s - loss: 0.4840 - acc: 0.8915 - iou: 0.1520 - dice_loss: 0.7390
Epoch 5: val_loss did not improve from 0.76951
73/73 [==============================] - 17s 237ms/step - loss: 0.4840 - acc: 0.8915 - iou: 0.1520 - dice_loss: 0.7390 - val_loss: 1.0374 - val_acc: 0.7726 - val_iou: 0.1439 - val_dice_loss: 0.7526
Epoch 6/200
73/73 [==============================] - ETA: 0s - loss: 0.4618 - acc: 0.8981 - iou: 0.1583 - dice_loss: 0.7293
Epoch 6: val_loss improved from 0.76951 to 0.46204, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 251ms/step - loss: 0.4618 - acc: 0.8981 - iou: 0.1583 - dice_loss: 0.7293 - val_loss: 0.4620 - val_acc: 0.9047 - val_iou: 0.1396 - val_dice_loss: 0.7588
Epoch 7/200
73/73 [==============================] - ETA: 0s - loss: 0.4400 - acc: 0.9013 - iou: 0.1639 - dice_loss: 0.7218
Epoch 7: val_loss did not improve from 0.46204
73/73 [==============================] - 17s 228ms/step - loss: 0.4400 - acc: 0.9013 - iou: 0.1639 - dice_loss: 0.7218 - val_loss: 0.6035 - val_acc: 0.8601 - val_iou: 0.1547 - val_dice_loss: 0.7354
Epoch 8/200
73/73 [==============================] - ETA: 0s - loss: 0.4169 - acc: 0.9064 - iou: 0.1695 - dice_loss: 0.7134
Epoch 8: val_loss did not improve from 0.46204
73/73 [==============================] - 17s 235ms/step - loss: 0.4169 - acc: 0.9064 - iou: 0.1695 - dice_loss: 0.7134 - val_loss: 0.4630 - val_acc: 0.8805 - val_iou: 0.1676 - val_dice_loss: 0.7173
Epoch 9/200
73/73 [==============================] - ETA: 0s - loss: 0.4100 - acc: 0.9047 - iou: 0.1663 - dice_loss: 0.7182
Epoch 9: val_loss improved from 0.46204 to 0.42495, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 252ms/step - loss: 0.4100 - acc: 0.9047 - iou: 0.1663 - dice_loss: 0.7182 - val_loss: 0.4249 - val_acc: 0.8894 - val_iou: 0.1489 - val_dice_loss: 0.7439
Epoch 10/200
73/73 [==============================] - ETA: 0s - loss: 0.3833 - acc: 0.9111 - iou: 0.1784 - dice_loss: 0.7005
Epoch 10: val_loss improved from 0.42495 to 0.35793, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 20s 278ms/step - loss: 0.3833 - acc: 0.9111 - iou: 0.1784 - dice_loss: 0.7005 - val_loss: 0.3579 - val_acc: 0.9207 - val_iou: 0.1727 - val_dice_loss: 0.7101
Epoch 11/200
73/73 [==============================] - ETA: 0s - loss: 0.3648 - acc: 0.9142 - iou: 0.1834 - dice_loss: 0.6947
Epoch 11: val_loss improved from 0.35793 to 0.34955, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 19s 256ms/step - loss: 0.3648 - acc: 0.9142 - iou: 0.1834 - dice_loss: 0.6947 - val_loss: 0.3495 - val_acc: 0.9117 - val_iou: 0.1638 - val_dice_loss: 0.7226
Epoch 12/200
73/73 [==============================] - ETA: 0s - loss: 0.3554 - acc: 0.9131 - iou: 0.1883 - dice_loss: 0.6876
Epoch 12: val_loss did not improve from 0.34955
73/73 [==============================] - 17s 232ms/step - loss: 0.3554 - acc: 0.9131 - iou: 0.1883 - dice_loss: 0.6876 - val_loss: 0.6683 - val_acc: 0.8205 - val_iou: 0.1693 - val_dice_loss: 0.7144
Epoch 13/200
73/73 [==============================] - ETA: 0s - loss: 0.3381 - acc: 0.9152 - iou: 0.1961 - dice_loss: 0.6758
Epoch 13: val_loss did not improve from 0.34955
73/73 [==============================] - 18s 239ms/step - loss: 0.3381 - acc: 0.9152 - iou: 0.1961 - dice_loss: 0.6758 - val_loss: 0.5524 - val_acc: 0.8720 - val_iou: 0.1858 - val_dice_loss: 0.6905
Epoch 14/200
73/73 [==============================] - ETA: 0s - loss: 0.3130 - acc: 0.9240 - iou: 0.2105 - dice_loss: 0.6565
Epoch 14: val_loss improved from 0.34955 to 0.29458, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 243ms/step - loss: 0.3130 - acc: 0.9240 - iou: 0.2105 - dice_loss: 0.6565 - val_loss: 0.2946 - val_acc: 0.9304 - val_iou: 0.1768 - val_dice_loss: 0.7042
Epoch 15/200
73/73 [==============================] - ETA: 0s - loss: 0.3040 - acc: 0.9231 - iou: 0.2155 - dice_loss: 0.6489
Epoch 15: val_loss improved from 0.29458 to 0.27921, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 20s 277ms/step - loss: 0.3040 - acc: 0.9231 - iou: 0.2155 - dice_loss: 0.6489 - val_loss: 0.2792 - val_acc: 0.9272 - val_iou: 0.1917 - val_dice_loss: 0.6821
Epoch 16/200
73/73 [==============================] - ETA: 0s - loss: 0.2919 - acc: 0.9250 - iou: 0.2231 - dice_loss: 0.6407
Epoch 16: val_loss improved from 0.27921 to 0.27699, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 19s 257ms/step - loss: 0.2919 - acc: 0.9250 - iou: 0.2231 - dice_loss: 0.6407 - val_loss: 0.2770 - val_acc: 0.9251 - val_iou: 0.1673 - val_dice_loss: 0.7172
Epoch 17/200
73/73 [==============================] - ETA: 0s - loss: 0.2851 - acc: 0.9237 - iou: 0.2221 - dice_loss: 0.6415
Epoch 17: val_loss did not improve from 0.27699
73/73 [==============================] - 17s 228ms/step - loss: 0.2851 - acc: 0.9237 - iou: 0.2221 - dice_loss: 0.6415 - val_loss: 0.3149 - val_acc: 0.9100 - val_iou: 0.2046 - val_dice_loss: 0.6646
Epoch 18/200
73/73 [==============================] - ETA: 0s - loss: 0.2741 - acc: 0.9261 - iou: 0.2306 - dice_loss: 0.6312
Epoch 18: val_loss did not improve from 0.27699
73/73 [==============================] - 17s 232ms/step - loss: 0.2741 - acc: 0.9261 - iou: 0.2306 - dice_loss: 0.6312 - val_loss: 0.2995 - val_acc: 0.9190 - val_iou: 0.2167 - val_dice_loss: 0.6480
Epoch 19/200
73/73 [==============================] - ETA: 0s - loss: 0.2636 - acc: 0.9275 - iou: 0.2394 - dice_loss: 0.6191
Epoch 19: val_loss did not improve from 0.27699
73/73 [==============================] - 17s 230ms/step - loss: 0.2636 - acc: 0.9275 - iou: 0.2394 - dice_loss: 0.6191 - val_loss: 0.3810 - val_acc: 0.9019 - val_iou: 0.2122 - val_dice_loss: 0.6552
Epoch 20/200
73/73 [==============================] - ETA: 0s - loss: 0.2481 - acc: 0.9304 - iou: 0.2537 - dice_loss: 0.6008
Epoch 20: val_loss did not improve from 0.27699
73/73 [==============================] - 19s 258ms/step - loss: 0.2481 - acc: 0.9304 - iou: 0.2537 - dice_loss: 0.6008 - val_loss: 0.2974 - val_acc: 0.9142 - val_iou: 0.2061 - val_dice_loss: 0.6618
Epoch 21/200
73/73 [==============================] - ETA: 0s - loss: 0.2453 - acc: 0.9297 - iou: 0.2549 - dice_loss: 0.5985
Epoch 21: val_loss did not improve from 0.27699
73/73 [==============================] - 17s 229ms/step - loss: 0.2453 - acc: 0.9297 - iou: 0.2549 - dice_loss: 0.5985 - val_loss: 0.3127 - val_acc: 0.9029 - val_iou: 0.2259 - val_dice_loss: 0.6365
Epoch 22/200
73/73 [==============================] - ETA: 0s - loss: 0.2311 - acc: 0.9325 - iou: 0.2711 - dice_loss: 0.5780
Epoch 22: val_loss improved from 0.27699 to 0.24452, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 248ms/step - loss: 0.2311 - acc: 0.9325 - iou: 0.2711 - dice_loss: 0.5780 - val_loss: 0.2445 - val_acc: 0.9288 - val_iou: 0.2343 - val_dice_loss: 0.6248
Epoch 23/200
73/73 [==============================] - ETA: 0s - loss: 0.2347 - acc: 0.9293 - iou: 0.2611 - dice_loss: 0.5922
Epoch 23: val_loss did not improve from 0.24452
73/73 [==============================] - 17s 234ms/step - loss: 0.2347 - acc: 0.9293 - iou: 0.2611 - dice_loss: 0.5922 - val_loss: 0.2873 - val_acc: 0.9151 - val_iou: 0.2516 - val_dice_loss: 0.6037
Epoch 24/200
73/73 [==============================] - ETA: 0s - loss: 0.2141 - acc: 0.9351 - iou: 0.2899 - dice_loss: 0.5558
Epoch 24: val_loss improved from 0.24452 to 0.23008, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 249ms/step - loss: 0.2141 - acc: 0.9351 - iou: 0.2899 - dice_loss: 0.5558 - val_loss: 0.2301 - val_acc: 0.9317 - val_iou: 0.2487 - val_dice_loss: 0.6071
Epoch 25/200
73/73 [==============================] - ETA: 0s - loss: 0.2124 - acc: 0.9340 - iou: 0.2873 - dice_loss: 0.5595
Epoch 25: val_loss improved from 0.23008 to 0.21198, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 241ms/step - loss: 0.2124 - acc: 0.9340 - iou: 0.2873 - dice_loss: 0.5595 - val_loss: 0.2120 - val_acc: 0.9274 - val_iou: 0.2276 - val_dice_loss: 0.6368
Epoch 26/200
73/73 [==============================] - ETA: 0s - loss: 0.1974 - acc: 0.9378 - iou: 0.3101 - dice_loss: 0.5317
Epoch 26: val_loss improved from 0.21198 to 0.19089, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 243ms/step - loss: 0.1974 - acc: 0.9378 - iou: 0.3101 - dice_loss: 0.5317 - val_loss: 0.1909 - val_acc: 0.9395 - val_iou: 0.2775 - val_dice_loss: 0.5714
Epoch 27/200
73/73 [==============================] - ETA: 0s - loss: 0.1903 - acc: 0.9399 - iou: 0.3187 - dice_loss: 0.5238
Epoch 27: val_loss did not improve from 0.19089
73/73 [==============================] - 17s 239ms/step - loss: 0.1903 - acc: 0.9399 - iou: 0.3187 - dice_loss: 0.5238 - val_loss: 0.1989 - val_acc: 0.9324 - val_iou: 0.2249 - val_dice_loss: 0.6370
Epoch 28/200
73/73 [==============================] - ETA: 0s - loss: 0.1774 - acc: 0.9427 - iou: 0.3359 - dice_loss: 0.5028
Epoch 28: val_loss improved from 0.19089 to 0.18927, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 249ms/step - loss: 0.1774 - acc: 0.9427 - iou: 0.3359 - dice_loss: 0.5028 - val_loss: 0.1893 - val_acc: 0.9343 - val_iou: 0.2281 - val_dice_loss: 0.6330
Epoch 29/200
73/73 [==============================] - ETA: 0s - loss: 0.1783 - acc: 0.9412 - iou: 0.3388 - dice_loss: 0.5008
Epoch 29: val_loss did not improve from 0.18927
73/73 [==============================] - 17s 227ms/step - loss: 0.1783 - acc: 0.9412 - iou: 0.3388 - dice_loss: 0.5008 - val_loss: 0.3109 - val_acc: 0.9077 - val_iou: 0.2570 - val_dice_loss: 0.5960
Epoch 30/200
73/73 [==============================] - ETA: 0s - loss: 0.1833 - acc: 0.9380 - iou: 0.3351 - dice_loss: 0.5038
Epoch 30: val_loss did not improve from 0.18927
73/73 [==============================] - 17s 231ms/step - loss: 0.1833 - acc: 0.9380 - iou: 0.3351 - dice_loss: 0.5038 - val_loss: 0.3136 - val_acc: 0.9000 - val_iou: 0.2670 - val_dice_loss: 0.5846
Epoch 31/200
73/73 [==============================] - ETA: 0s - loss: 0.1665 - acc: 0.9427 - iou: 0.3517 - dice_loss: 0.4874
Epoch 31: val_loss improved from 0.18927 to 0.18369, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 254ms/step - loss: 0.1665 - acc: 0.9427 - iou: 0.3517 - dice_loss: 0.4874 - val_loss: 0.1837 - val_acc: 0.9318 - val_iou: 0.2774 - val_dice_loss: 0.5713
Epoch 32/200
73/73 [==============================] - ETA: 0s - loss: 0.1642 - acc: 0.9427 - iou: 0.3578 - dice_loss: 0.4805
Epoch 32: val_loss did not improve from 0.18369
73/73 [==============================] - 17s 229ms/step - loss: 0.1642 - acc: 0.9427 - iou: 0.3578 - dice_loss: 0.4805 - val_loss: 0.2056 - val_acc: 0.9269 - val_iou: 0.2902 - val_dice_loss: 0.5551
Epoch 33/200
73/73 [==============================] - ETA: 0s - loss: 0.1543 - acc: 0.9457 - iou: 0.3712 - dice_loss: 0.4662
Epoch 33: val_loss improved from 0.18369 to 0.17880, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 244ms/step - loss: 0.1543 - acc: 0.9457 - iou: 0.3712 - dice_loss: 0.4662 - val_loss: 0.1788 - val_acc: 0.9345 - val_iou: 0.3067 - val_dice_loss: 0.5344
Epoch 34/200
73/73 [==============================] - ETA: 0s - loss: 0.1570 - acc: 0.9428 - iou: 0.3707 - dice_loss: 0.4668
Epoch 34: val_loss did not improve from 0.17880
73/73 [==============================] - 17s 228ms/step - loss: 0.1570 - acc: 0.9428 - iou: 0.3707 - dice_loss: 0.4668 - val_loss: 0.1916 - val_acc: 0.9321 - val_iou: 0.2877 - val_dice_loss: 0.5589
Epoch 35/200
73/73 [==============================] - ETA: 0s - loss: 0.1450 - acc: 0.9466 - iou: 0.3917 - dice_loss: 0.4447
Epoch 35: val_loss improved from 0.17880 to 0.17182, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 253ms/step - loss: 0.1450 - acc: 0.9466 - iou: 0.3917 - dice_loss: 0.4447 - val_loss: 0.1718 - val_acc: 0.9348 - val_iou: 0.2726 - val_dice_loss: 0.5762
Epoch 36/200
73/73 [==============================] - ETA: 0s - loss: 0.1348 - acc: 0.9489 - iou: 0.4113 - dice_loss: 0.4240
Epoch 36: val_loss improved from 0.17182 to 0.17125, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 240ms/step - loss: 0.1348 - acc: 0.9489 - iou: 0.4113 - dice_loss: 0.4240 - val_loss: 0.1713 - val_acc: 0.9309 - val_iou: 0.2813 - val_dice_loss: 0.5663
Epoch 37/200
73/73 [==============================] - ETA: 0s - loss: 0.1317 - acc: 0.9495 - iou: 0.4142 - dice_loss: 0.4229
Epoch 37: val_loss improved from 0.17125 to 0.16964, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 244ms/step - loss: 0.1317 - acc: 0.9495 - iou: 0.4142 - dice_loss: 0.4229 - val_loss: 0.1696 - val_acc: 0.9311 - val_iou: 0.2620 - val_dice_loss: 0.5887
Epoch 38/200
73/73 [==============================] - ETA: 0s - loss: 0.1349 - acc: 0.9472 - iou: 0.4104 - dice_loss: 0.4253
Epoch 38: val_loss improved from 0.16964 to 0.16608, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 19s 263ms/step - loss: 0.1349 - acc: 0.9472 - iou: 0.4104 - dice_loss: 0.4253 - val_loss: 0.1661 - val_acc: 0.9359 - val_iou: 0.3186 - val_dice_loss: 0.5230
Epoch 39/200
73/73 [==============================] - ETA: 0s - loss: 0.1246 - acc: 0.9500 - iou: 0.4277 - dice_loss: 0.4104
Epoch 39: val_loss did not improve from 0.16608
73/73 [==============================] - 16s 224ms/step - loss: 0.1246 - acc: 0.9500 - iou: 0.4277 - dice_loss: 0.4104 - val_loss: 0.1665 - val_acc: 0.9350 - val_iou: 0.3267 - val_dice_loss: 0.5151
Epoch 40/200
73/73 [==============================] - ETA: 0s - loss: 0.1204 - acc: 0.9504 - iou: 0.4459 - dice_loss: 0.3898
Epoch 40: val_loss improved from 0.16608 to 0.14776, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 246ms/step - loss: 0.1204 - acc: 0.9504 - iou: 0.4459 - dice_loss: 0.3898 - val_loss: 0.1478 - val_acc: 0.9406 - val_iou: 0.3436 - val_dice_loss: 0.4941
Epoch 41/200
73/73 [==============================] - ETA: 0s - loss: 0.1083 - acc: 0.9541 - iou: 0.4779 - dice_loss: 0.3580
Epoch 41: val_loss did not improve from 0.14776
73/73 [==============================] - 17s 231ms/step - loss: 0.1083 - acc: 0.9541 - iou: 0.4779 - dice_loss: 0.3580 - val_loss: 0.1610 - val_acc: 0.9352 - val_iou: 0.3477 - val_dice_loss: 0.4916
Epoch 42/200
73/73 [==============================] - ETA: 0s - loss: 0.1099 - acc: 0.9529 - iou: 0.4724 - dice_loss: 0.3652
Epoch 42: val_loss did not improve from 0.14776
73/73 [==============================] - 18s 253ms/step - loss: 0.1099 - acc: 0.9529 - iou: 0.4724 - dice_loss: 0.3652 - val_loss: 0.1648 - val_acc: 0.9335 - val_iou: 0.3723 - val_dice_loss: 0.4646
Epoch 43/200
73/73 [==============================] - ETA: 0s - loss: 0.1139 - acc: 0.9509 - iou: 0.4568 - dice_loss: 0.3808
Epoch 43: val_loss did not improve from 0.14776
73/73 [==============================] - 16s 225ms/step - loss: 0.1139 - acc: 0.9509 - iou: 0.4568 - dice_loss: 0.3808 - val_loss: 0.1748 - val_acc: 0.9297 - val_iou: 0.3654 - val_dice_loss: 0.4716
Epoch 44/200
73/73 [==============================] - ETA: 0s - loss: 0.1068 - acc: 0.9525 - iou: 0.4763 - dice_loss: 0.3643
Epoch 44: val_loss did not improve from 0.14776
73/73 [==============================] - 16s 226ms/step - loss: 0.1068 - acc: 0.9525 - iou: 0.4763 - dice_loss: 0.3643 - val_loss: 0.1653 - val_acc: 0.9327 - val_iou: 0.3657 - val_dice_loss: 0.4708
Epoch 45/200
73/73 [==============================] - ETA: 0s - loss: 0.1078 - acc: 0.9520 - iou: 0.4751 - dice_loss: 0.3641
Epoch 45: val_loss did not improve from 0.14776
73/73 [==============================] - 16s 225ms/step - loss: 0.1078 - acc: 0.9520 - iou: 0.4751 - dice_loss: 0.3641 - val_loss: 0.1635 - val_acc: 0.9347 - val_iou: 0.3759 - val_dice_loss: 0.4603
Epoch 46/200
73/73 [==============================] - ETA: 0s - loss: 0.1006 - acc: 0.9534 - iou: 0.4947 - dice_loss: 0.3448
Epoch 46: val_loss improved from 0.14776 to 0.14272, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 19s 267ms/step - loss: 0.1006 - acc: 0.9534 - iou: 0.4947 - dice_loss: 0.3448 - val_loss: 0.1427 - val_acc: 0.9397 - val_iou: 0.3691 - val_dice_loss: 0.4662
Epoch 47/200
73/73 [==============================] - ETA: 0s - loss: 0.0891 - acc: 0.9570 - iou: 0.5260 - dice_loss: 0.3171
Epoch 47: val_loss did not improve from 0.14272
73/73 [==============================] - 17s 227ms/step - loss: 0.0891 - acc: 0.9570 - iou: 0.5260 - dice_loss: 0.3171 - val_loss: 0.1478 - val_acc: 0.9383 - val_iou: 0.3740 - val_dice_loss: 0.4615
Epoch 48/200
73/73 [==============================] - ETA: 0s - loss: 0.0854 - acc: 0.9578 - iou: 0.5368 - dice_loss: 0.3078
Epoch 48: val_loss improved from 0.14272 to 0.14140, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 247ms/step - loss: 0.0854 - acc: 0.9578 - iou: 0.5368 - dice_loss: 0.3078 - val_loss: 0.1414 - val_acc: 0.9406 - val_iou: 0.3893 - val_dice_loss: 0.4458
Epoch 49/200
73/73 [==============================] - ETA: 0s - loss: 0.0827 - acc: 0.9581 - iou: 0.5399 - dice_loss: 0.3078
Epoch 49: val_loss did not improve from 0.14140
73/73 [==============================] - 17s 236ms/step - loss: 0.0827 - acc: 0.9581 - iou: 0.5399 - dice_loss: 0.3078 - val_loss: 0.1565 - val_acc: 0.9352 - val_iou: 0.3941 - val_dice_loss: 0.4415
Epoch 50/200
73/73 [==============================] - ETA: 0s - loss: 0.0891 - acc: 0.9550 - iou: 0.5351 - dice_loss: 0.3077
Epoch 50: val_loss did not improve from 0.14140
73/73 [==============================] - 17s 232ms/step - loss: 0.0891 - acc: 0.9550 - iou: 0.5351 - dice_loss: 0.3077 - val_loss: 0.1738 - val_acc: 0.9272 - val_iou: 0.3817 - val_dice_loss: 0.4565
Epoch 51/200
73/73 [==============================] - ETA: 0s - loss: 0.0939 - acc: 0.9528 - iou: 0.5234 - dice_loss: 0.3184
Epoch 51: val_loss did not improve from 0.14140
73/73 [==============================] - 16s 225ms/step - loss: 0.0939 - acc: 0.9528 - iou: 0.5234 - dice_loss: 0.3184 - val_loss: 0.1498 - val_acc: 0.9351 - val_iou: 0.3880 - val_dice_loss: 0.4497
Epoch 52/200
73/73 [==============================] - ETA: 0s - loss: 0.0842 - acc: 0.9560 - iou: 0.5493 - dice_loss: 0.2959
Epoch 52: val_loss did not improve from 0.14140
73/73 [==============================] - 17s 232ms/step - loss: 0.0842 - acc: 0.9560 - iou: 0.5493 - dice_loss: 0.2959 - val_loss: 0.1415 - val_acc: 0.9413 - val_iou: 0.4337 - val_dice_loss: 0.4006
Epoch 53/200
73/73 [==============================] - ETA: 0s - loss: 0.0754 - acc: 0.9588 - iou: 0.5662 - dice_loss: 0.2839
Epoch 53: val_loss improved from 0.14140 to 0.14130, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 21s 292ms/step - loss: 0.0754 - acc: 0.9588 - iou: 0.5662 - dice_loss: 0.2839 - val_loss: 0.1413 - val_acc: 0.9383 - val_iou: 0.4083 - val_dice_loss: 0.4276
Epoch 54/200
73/73 [==============================] - ETA: 0s - loss: 0.0727 - acc: 0.9592 - iou: 0.5771 - dice_loss: 0.2766
Epoch 54: val_loss improved from 0.14130 to 0.14079, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 245ms/step - loss: 0.0727 - acc: 0.9592 - iou: 0.5771 - dice_loss: 0.2766 - val_loss: 0.1408 - val_acc: 0.9392 - val_iou: 0.4173 - val_dice_loss: 0.4194
Epoch 55/200
73/73 [==============================] - ETA: 0s - loss: 0.0696 - acc: 0.9598 - iou: 0.5905 - dice_loss: 0.2634
Epoch 55: val_loss improved from 0.14079 to 0.13879, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 241ms/step - loss: 0.0696 - acc: 0.9598 - iou: 0.5905 - dice_loss: 0.2634 - val_loss: 0.1388 - val_acc: 0.9393 - val_iou: 0.4140 - val_dice_loss: 0.4235
Epoch 56/200
73/73 [==============================] - ETA: 0s - loss: 0.0670 - acc: 0.9600 - iou: 0.6039 - dice_loss: 0.2515
Epoch 56: val_loss did not improve from 0.13879
73/73 [==============================] - 18s 250ms/step - loss: 0.0670 - acc: 0.9600 - iou: 0.6039 - dice_loss: 0.2515 - val_loss: 0.1446 - val_acc: 0.9371 - val_iou: 0.4396 - val_dice_loss: 0.3986
Epoch 57/200
73/73 [==============================] - ETA: 0s - loss: 0.0671 - acc: 0.9596 - iou: 0.6069 - dice_loss: 0.2501
Epoch 57: val_loss improved from 0.13879 to 0.12909, saving model to ./segnet_200_v3.h5
73/73 [==============================] - 18s 243ms/step - loss: 0.0671 - acc: 0.9596 - iou: 0.6069 - dice_loss: 0.2501 - val_loss: 0.1291 - val_acc: 0.9422 - val_iou: 0.4314 - val_dice_loss: 0.4036
Epoch 58/200
73/73 [==============================] - ETA: 0s - loss: 0.0961 - acc: 0.9490 - iou: 0.5308 - dice_loss: 0.3131
Epoch 58: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 231ms/step - loss: 0.0961 - acc: 0.9490 - iou: 0.5308 - dice_loss: 0.3131 - val_loss: 0.7975 - val_acc: 0.8082 - val_iou: 0.2389 - val_dice_loss: 0.6206
Epoch 59/200
73/73 [==============================] - ETA: 0s - loss: 0.1038 - acc: 0.9459 - iou: 0.5069 - dice_loss: 0.3360
Epoch 59: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 230ms/step - loss: 0.1038 - acc: 0.9459 - iou: 0.5069 - dice_loss: 0.3360 - val_loss: 0.7007 - val_acc: 0.8457 - val_iou: 0.2949 - val_dice_loss: 0.5520
Epoch 60/200
73/73 [==============================] - ETA: 0s - loss: 0.0834 - acc: 0.9532 - iou: 0.5527 - dice_loss: 0.2948
Epoch 60: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 238ms/step - loss: 0.0834 - acc: 0.9532 - iou: 0.5527 - dice_loss: 0.2948 - val_loss: 0.1683 - val_acc: 0.9330 - val_iou: 0.4452 - val_dice_loss: 0.3977
Epoch 61/200
73/73 [==============================] - ETA: 0s - loss: 0.0669 - acc: 0.9585 - iou: 0.6087 - dice_loss: 0.2484
Epoch 61: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 231ms/step - loss: 0.0669 - acc: 0.9585 - iou: 0.6087 - dice_loss: 0.2484 - val_loss: 0.1515 - val_acc: 0.9314 - val_iou: 0.4268 - val_dice_loss: 0.4135
Epoch 62/200
73/73 [==============================] - ETA: 0s - loss: 0.0597 - acc: 0.9606 - iou: 0.6316 - dice_loss: 0.2307
Epoch 62: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 227ms/step - loss: 0.0597 - acc: 0.9606 - iou: 0.6316 - dice_loss: 0.2307 - val_loss: 0.1393 - val_acc: 0.9389 - val_iou: 0.4272 - val_dice_loss: 0.4097
Epoch 63/200
73/73 [==============================] - ETA: 0s - loss: 0.0563 - acc: 0.9614 - iou: 0.6467 - dice_loss: 0.2185
Epoch 63: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 227ms/step - loss: 0.0563 - acc: 0.9614 - iou: 0.6467 - dice_loss: 0.2185 - val_loss: 0.1366 - val_acc: 0.9371 - val_iou: 0.4422 - val_dice_loss: 0.3976
Epoch 64/200
73/73 [==============================] - ETA: 0s - loss: 0.0528 - acc: 0.9623 - iou: 0.6625 - dice_loss: 0.2074
Epoch 64: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 235ms/step - loss: 0.0528 - acc: 0.9623 - iou: 0.6625 - dice_loss: 0.2074 - val_loss: 0.1450 - val_acc: 0.9360 - val_iou: 0.4424 - val_dice_loss: 0.3953
Epoch 65/200
73/73 [==============================] - ETA: 0s - loss: 0.0549 - acc: 0.9615 - iou: 0.6533 - dice_loss: 0.2158
Epoch 65: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 236ms/step - loss: 0.0549 - acc: 0.9615 - iou: 0.6533 - dice_loss: 0.2158 - val_loss: 0.1717 - val_acc: 0.9328 - val_iou: 0.4668 - val_dice_loss: 0.3717
Epoch 66/200
73/73 [==============================] - ETA: 0s - loss: 0.0530 - acc: 0.9618 - iou: 0.6634 - dice_loss: 0.2072
Epoch 66: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 233ms/step - loss: 0.0530 - acc: 0.9618 - iou: 0.6634 - dice_loss: 0.2072 - val_loss: 0.1464 - val_acc: 0.9376 - val_iou: 0.4619 - val_dice_loss: 0.3764
Epoch 67/200
73/73 [==============================] - ETA: 0s - loss: 0.0486 - acc: 0.9629 - iou: 0.6742 - dice_loss: 0.1995
Epoch 67: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 231ms/step - loss: 0.0486 - acc: 0.9629 - iou: 0.6742 - dice_loss: 0.1995 - val_loss: 0.1377 - val_acc: 0.9388 - val_iou: 0.4520 - val_dice_loss: 0.3863
Epoch 68/200
73/73 [==============================] - ETA: 0s - loss: 0.0473 - acc: 0.9631 - iou: 0.6846 - dice_loss: 0.1908
Epoch 68: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 230ms/step - loss: 0.0473 - acc: 0.9631 - iou: 0.6846 - dice_loss: 0.1908 - val_loss: 0.1473 - val_acc: 0.9375 - val_iou: 0.4472 - val_dice_loss: 0.3921
Epoch 69/200
73/73 [==============================] - ETA: 0s - loss: 0.0508 - acc: 0.9617 - iou: 0.6794 - dice_loss: 0.1942
Epoch 69: val_loss did not improve from 0.12909
73/73 [==============================] - 19s 255ms/step - loss: 0.0508 - acc: 0.9617 - iou: 0.6794 - dice_loss: 0.1942 - val_loss: 0.1494 - val_acc: 0.9359 - val_iou: 0.4663 - val_dice_loss: 0.3734
Epoch 70/200
73/73 [==============================] - ETA: 0s - loss: 0.0450 - acc: 0.9632 - iou: 0.7022 - dice_loss: 0.1784
Epoch 70: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 231ms/step - loss: 0.0450 - acc: 0.9632 - iou: 0.7022 - dice_loss: 0.1784 - val_loss: 0.1646 - val_acc: 0.9338 - val_iou: 0.4961 - val_dice_loss: 0.3474
Epoch 71/200
73/73 [==============================] - ETA: 0s - loss: 0.0416 - acc: 0.9643 - iou: 0.7094 - dice_loss: 0.1762
Epoch 71: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 231ms/step - loss: 0.0416 - acc: 0.9643 - iou: 0.7094 - dice_loss: 0.1762 - val_loss: 0.1465 - val_acc: 0.9366 - val_iou: 0.4845 - val_dice_loss: 0.3560
Epoch 72/200
73/73 [==============================] - ETA: 0s - loss: 0.0410 - acc: 0.9642 - iou: 0.7173 - dice_loss: 0.1685
Epoch 72: val_loss did not improve from 0.12909
73/73 [==============================] - 16s 226ms/step - loss: 0.0410 - acc: 0.9642 - iou: 0.7173 - dice_loss: 0.1685 - val_loss: 0.1426 - val_acc: 0.9391 - val_iou: 0.4964 - val_dice_loss: 0.3452
Epoch 73/200
73/73 [==============================] - ETA: 0s - loss: 0.0406 - acc: 0.9642 - iou: 0.7162 - dice_loss: 0.1703
Epoch 73: val_loss did not improve from 0.12909
73/73 [==============================] - 19s 256ms/step - loss: 0.0406 - acc: 0.9642 - iou: 0.7162 - dice_loss: 0.1703 - val_loss: 0.1380 - val_acc: 0.9407 - val_iou: 0.5013 - val_dice_loss: 0.3412
Epoch 74/200
73/73 [==============================] - ETA: 0s - loss: 0.0408 - acc: 0.9640 - iou: 0.7237 - dice_loss: 0.1638
Epoch 74: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 227ms/step - loss: 0.0408 - acc: 0.9640 - iou: 0.7237 - dice_loss: 0.1638 - val_loss: 0.1371 - val_acc: 0.9408 - val_iou: 0.5083 - val_dice_loss: 0.3366
Epoch 75/200
73/73 [==============================] - ETA: 0s - loss: 0.0383 - acc: 0.9645 - iou: 0.7344 - dice_loss: 0.1563
Epoch 75: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 231ms/step - loss: 0.0383 - acc: 0.9645 - iou: 0.7344 - dice_loss: 0.1563 - val_loss: 0.1419 - val_acc: 0.9397 - val_iou: 0.5027 - val_dice_loss: 0.3410
Epoch 76/200
73/73 [==============================] - ETA: 0s - loss: 0.0361 - acc: 0.9649 - iou: 0.7448 - dice_loss: 0.1490
Epoch 76: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 229ms/step - loss: 0.0361 - acc: 0.9649 - iou: 0.7448 - dice_loss: 0.1490 - val_loss: 0.1402 - val_acc: 0.9406 - val_iou: 0.5086 - val_dice_loss: 0.3363
Epoch 77/200
73/73 [==============================] - ETA: 0s - loss: 0.0395 - acc: 0.9639 - iou: 0.7271 - dice_loss: 0.1625
Epoch 77: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 238ms/step - loss: 0.0395 - acc: 0.9639 - iou: 0.7271 - dice_loss: 0.1625 - val_loss: 0.1528 - val_acc: 0.9365 - val_iou: 0.4974 - val_dice_loss: 0.3464
Epoch 78/200
73/73 [==============================] - ETA: 0s - loss: 0.0366 - acc: 0.9645 - iou: 0.7440 - dice_loss: 0.1506
Epoch 78: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 234ms/step - loss: 0.0366 - acc: 0.9645 - iou: 0.7440 - dice_loss: 0.1506 - val_loss: 0.1665 - val_acc: 0.9360 - val_iou: 0.5048 - val_dice_loss: 0.3416
Epoch 79/200
73/73 [==============================] - ETA: 0s - loss: 0.0322 - acc: 0.9656 - iou: 0.7692 - dice_loss: 0.1324
Epoch 79: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 232ms/step - loss: 0.0322 - acc: 0.9656 - iou: 0.7692 - dice_loss: 0.1324 - val_loss: 0.1551 - val_acc: 0.9368 - val_iou: 0.5008 - val_dice_loss: 0.3453
Epoch 80/200
73/73 [==============================] - ETA: 0s - loss: 0.0318 - acc: 0.9657 - iou: 0.7705 - dice_loss: 0.1333
Epoch 80: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 232ms/step - loss: 0.0318 - acc: 0.9657 - iou: 0.7705 - dice_loss: 0.1333 - val_loss: 0.1587 - val_acc: 0.9364 - val_iou: 0.5360 - val_dice_loss: 0.3117
Epoch 81/200
73/73 [==============================] - ETA: 0s - loss: 0.0340 - acc: 0.9648 - iou: 0.7598 - dice_loss: 0.1387
Epoch 81: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 237ms/step - loss: 0.0340 - acc: 0.9648 - iou: 0.7598 - dice_loss: 0.1387 - val_loss: 0.1562 - val_acc: 0.9372 - val_iou: 0.5186 - val_dice_loss: 0.3287
Epoch 82/200
73/73 [==============================] - ETA: 0s - loss: 0.0318 - acc: 0.9653 - iou: 0.7757 - dice_loss: 0.1280
Epoch 82: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 238ms/step - loss: 0.0318 - acc: 0.9653 - iou: 0.7757 - dice_loss: 0.1280 - val_loss: 0.1716 - val_acc: 0.9348 - val_iou: 0.5086 - val_dice_loss: 0.3394
Epoch 83/200
73/73 [==============================] - ETA: 0s - loss: 0.0317 - acc: 0.9654 - iou: 0.7732 - dice_loss: 0.1304
Epoch 83: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 232ms/step - loss: 0.0317 - acc: 0.9654 - iou: 0.7732 - dice_loss: 0.1304 - val_loss: 0.1486 - val_acc: 0.9404 - val_iou: 0.5282 - val_dice_loss: 0.3197
Epoch 84/200
73/73 [==============================] - ETA: 0s - loss: 0.0320 - acc: 0.9651 - iou: 0.7723 - dice_loss: 0.1314
Epoch 84: val_loss did not improve from 0.12909
73/73 [==============================] - 16s 226ms/step - loss: 0.0320 - acc: 0.9651 - iou: 0.7723 - dice_loss: 0.1314 - val_loss: 0.1784 - val_acc: 0.9329 - val_iou: 0.5284 - val_dice_loss: 0.3205
Epoch 85/200
73/73 [==============================] - ETA: 0s - loss: 0.0332 - acc: 0.9646 - iou: 0.7678 - dice_loss: 0.1348
Epoch 85: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 230ms/step - loss: 0.0332 - acc: 0.9646 - iou: 0.7678 - dice_loss: 0.1348 - val_loss: 0.1641 - val_acc: 0.9355 - val_iou: 0.5247 - val_dice_loss: 0.3247
Epoch 86/200
73/73 [==============================] - ETA: 0s - loss: 0.0330 - acc: 0.9645 - iou: 0.7768 - dice_loss: 0.1281
Epoch 86: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 238ms/step - loss: 0.0330 - acc: 0.9645 - iou: 0.7768 - dice_loss: 0.1281 - val_loss: 0.1974 - val_acc: 0.9241 - val_iou: 0.4100 - val_dice_loss: 0.4354
Epoch 87/200
73/73 [==============================] - ETA: 0s - loss: 0.1115 - acc: 0.9403 - iou: 0.5266 - dice_loss: 0.3189
Epoch 87: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 228ms/step - loss: 0.1115 - acc: 0.9403 - iou: 0.5266 - dice_loss: 0.3189 - val_loss: 1.3548 - val_acc: 0.7762 - val_iou: 0.2277 - val_dice_loss: 0.6346
Epoch 88/200
73/73 [==============================] - ETA: 0s - loss: 0.1140 - acc: 0.9380 - iou: 0.5110 - dice_loss: 0.3297
Epoch 88: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 228ms/step - loss: 0.1140 - acc: 0.9380 - iou: 0.5110 - dice_loss: 0.3297 - val_loss: 0.3008 - val_acc: 0.9220 - val_iou: 0.4876 - val_dice_loss: 0.3587
Epoch 89/200
73/73 [==============================] - ETA: 0s - loss: 0.0672 - acc: 0.9533 - iou: 0.6344 - dice_loss: 0.2271
Epoch 89: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 228ms/step - loss: 0.0672 - acc: 0.9533 - iou: 0.6344 - dice_loss: 0.2271 - val_loss: 0.1496 - val_acc: 0.9383 - val_iou: 0.5018 - val_dice_loss: 0.3422
Epoch 90/200
73/73 [==============================] - ETA: 0s - loss: 0.0456 - acc: 0.9601 - iou: 0.7117 - dice_loss: 0.1718
Epoch 90: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 235ms/step - loss: 0.0456 - acc: 0.9601 - iou: 0.7117 - dice_loss: 0.1718 - val_loss: 0.1562 - val_acc: 0.9372 - val_iou: 0.4713 - val_dice_loss: 0.3719
Epoch 91/200
73/73 [==============================] - ETA: 0s - loss: 0.0381 - acc: 0.9627 - iou: 0.7412 - dice_loss: 0.1518
Epoch 91: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 237ms/step - loss: 0.0381 - acc: 0.9627 - iou: 0.7412 - dice_loss: 0.1518 - val_loss: 0.1495 - val_acc: 0.9396 - val_iou: 0.5197 - val_dice_loss: 0.3290
Epoch 92/200
73/73 [==============================] - ETA: 0s - loss: 0.0335 - acc: 0.9640 - iou: 0.7710 - dice_loss: 0.1309
Epoch 92: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 227ms/step - loss: 0.0335 - acc: 0.9640 - iou: 0.7710 - dice_loss: 0.1309 - val_loss: 0.1501 - val_acc: 0.9380 - val_iou: 0.5192 - val_dice_loss: 0.3302
Epoch 93/200
73/73 [==============================] - ETA: 0s - loss: 0.0321 - acc: 0.9644 - iou: 0.7676 - dice_loss: 0.1349
Epoch 93: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 233ms/step - loss: 0.0321 - acc: 0.9644 - iou: 0.7676 - dice_loss: 0.1349 - val_loss: 0.1659 - val_acc: 0.9327 - val_iou: 0.5026 - val_dice_loss: 0.3478
Epoch 94/200
73/73 [==============================] - ETA: 0s - loss: 0.0295 - acc: 0.9649 - iou: 0.7878 - dice_loss: 0.1216
Epoch 94: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 227ms/step - loss: 0.0295 - acc: 0.9649 - iou: 0.7878 - dice_loss: 0.1216 - val_loss: 0.1578 - val_acc: 0.9370 - val_iou: 0.5286 - val_dice_loss: 0.3223
Epoch 95/200
73/73 [==============================] - ETA: 0s - loss: 0.0278 - acc: 0.9655 - iou: 0.7949 - dice_loss: 0.1171
Epoch 95: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 239ms/step - loss: 0.0278 - acc: 0.9655 - iou: 0.7949 - dice_loss: 0.1171 - val_loss: 0.1584 - val_acc: 0.9366 - val_iou: 0.5277 - val_dice_loss: 0.3250
Epoch 96/200
73/73 [==============================] - ETA: 0s - loss: 0.0267 - acc: 0.9657 - iou: 0.8038 - dice_loss: 0.1109
Epoch 96: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 233ms/step - loss: 0.0267 - acc: 0.9657 - iou: 0.8038 - dice_loss: 0.1109 - val_loss: 0.1536 - val_acc: 0.9380 - val_iou: 0.5481 - val_dice_loss: 0.3052
Epoch 97/200
73/73 [==============================] - ETA: 0s - loss: 0.0257 - acc: 0.9659 - iou: 0.8106 - dice_loss: 0.1062
Epoch 97: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 231ms/step - loss: 0.0257 - acc: 0.9659 - iou: 0.8106 - dice_loss: 0.1062 - val_loss: 0.1615 - val_acc: 0.9368 - val_iou: 0.5382 - val_dice_loss: 0.3158
Epoch 98/200
73/73 [==============================] - ETA: 0s - loss: 0.0246 - acc: 0.9661 - iou: 0.8224 - dice_loss: 0.0990
Epoch 98: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 229ms/step - loss: 0.0246 - acc: 0.9661 - iou: 0.8224 - dice_loss: 0.0990 - val_loss: 0.1577 - val_acc: 0.9374 - val_iou: 0.5408 - val_dice_loss: 0.3127
Epoch 99/200
73/73 [==============================] - ETA: 0s - loss: 0.0266 - acc: 0.9656 - iou: 0.8120 - dice_loss: 0.1053
Epoch 99: val_loss did not improve from 0.12909
73/73 [==============================] - 18s 251ms/step - loss: 0.0266 - acc: 0.9656 - iou: 0.8120 - dice_loss: 0.1053 - val_loss: 0.1613 - val_acc: 0.9366 - val_iou: 0.5427 - val_dice_loss: 0.3118
Epoch 100/200
73/73 [==============================] - ETA: 0s - loss: 0.0235 - acc: 0.9663 - iou: 0.8294 - dice_loss: 0.0944
Epoch 100: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 230ms/step - loss: 0.0235 - acc: 0.9663 - iou: 0.8294 - dice_loss: 0.0944 - val_loss: 0.1666 - val_acc: 0.9366 - val_iou: 0.5564 - val_dice_loss: 0.2993
Epoch 101/200
73/73 [==============================] - ETA: 0s - loss: 0.0234 - acc: 0.9662 - iou: 0.8276 - dice_loss: 0.0965
Epoch 101: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 228ms/step - loss: 0.0234 - acc: 0.9662 - iou: 0.8276 - dice_loss: 0.0965 - val_loss: 0.1680 - val_acc: 0.9328 - val_iou: 0.5421 - val_dice_loss: 0.3129
Epoch 102/200
73/73 [==============================] - ETA: 0s - loss: 0.0218 - acc: 0.9666 - iou: 0.8355 - dice_loss: 0.0909
Epoch 102: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 227ms/step - loss: 0.0218 - acc: 0.9666 - iou: 0.8355 - dice_loss: 0.0909 - val_loss: 0.1640 - val_acc: 0.9359 - val_iou: 0.5514 - val_dice_loss: 0.3034
Epoch 103/200
73/73 [==============================] - ETA: 0s - loss: 0.0219 - acc: 0.9667 - iou: 0.8317 - dice_loss: 0.0935
Epoch 103: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 231ms/step - loss: 0.0219 - acc: 0.9667 - iou: 0.8317 - dice_loss: 0.0935 - val_loss: 0.1582 - val_acc: 0.9380 - val_iou: 0.5710 - val_dice_loss: 0.2872
Epoch 104/200
73/73 [==============================] - ETA: 0s - loss: 0.0212 - acc: 0.9667 - iou: 0.8448 - dice_loss: 0.0852
Epoch 104: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 237ms/step - loss: 0.0212 - acc: 0.9667 - iou: 0.8448 - dice_loss: 0.0852 - val_loss: 0.1640 - val_acc: 0.9371 - val_iou: 0.5672 - val_dice_loss: 0.2923
Epoch 105/200
73/73 [==============================] - ETA: 0s - loss: 0.0205 - acc: 0.9669 - iou: 0.8388 - dice_loss: 0.0896
Epoch 105: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 228ms/step - loss: 0.0205 - acc: 0.9669 - iou: 0.8388 - dice_loss: 0.0896 - val_loss: 0.1718 - val_acc: 0.9347 - val_iou: 0.5704 - val_dice_loss: 0.2898
Epoch 106/200
73/73 [==============================] - ETA: 0s - loss: 0.0199 - acc: 0.9669 - iou: 0.8503 - dice_loss: 0.0823
Epoch 106: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 231ms/step - loss: 0.0199 - acc: 0.9669 - iou: 0.8503 - dice_loss: 0.0823 - val_loss: 0.1775 - val_acc: 0.9354 - val_iou: 0.5659 - val_dice_loss: 0.2930
Epoch 107/200
73/73 [==============================] - ETA: 0s - loss: 0.0193 - acc: 0.9672 - iou: 0.8571 - dice_loss: 0.0779
Epoch 107: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 229ms/step - loss: 0.0193 - acc: 0.9672 - iou: 0.8571 - dice_loss: 0.0779 - val_loss: 0.1757 - val_acc: 0.9348 - val_iou: 0.5645 - val_dice_loss: 0.2957
Epoch 108/200
73/73 [==============================] - ETA: 0s - loss: 0.0183 - acc: 0.9673 - iou: 0.8580 - dice_loss: 0.0781
Epoch 108: val_loss did not improve from 0.12909
73/73 [==============================] - 18s 242ms/step - loss: 0.0183 - acc: 0.9673 - iou: 0.8580 - dice_loss: 0.0781 - val_loss: 0.1705 - val_acc: 0.9386 - val_iou: 0.5765 - val_dice_loss: 0.2838
Epoch 109/200
73/73 [==============================] - ETA: 0s - loss: 0.0176 - acc: 0.9675 - iou: 0.8673 - dice_loss: 0.0719
Epoch 109: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 235ms/step - loss: 0.0176 - acc: 0.9675 - iou: 0.8673 - dice_loss: 0.0719 - val_loss: 0.1730 - val_acc: 0.9364 - val_iou: 0.5699 - val_dice_loss: 0.2890
Epoch 110/200
73/73 [==============================] - ETA: 0s - loss: 0.0250 - acc: 0.9653 - iou: 0.8226 - dice_loss: 0.1001
Epoch 110: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 228ms/step - loss: 0.0250 - acc: 0.9653 - iou: 0.8226 - dice_loss: 0.1001 - val_loss: 0.1658 - val_acc: 0.9359 - val_iou: 0.5778 - val_dice_loss: 0.2826
Epoch 111/200
73/73 [==============================] - ETA: 0s - loss: 0.0231 - acc: 0.9659 - iou: 0.8394 - dice_loss: 0.0888
Epoch 111: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 233ms/step - loss: 0.0231 - acc: 0.9659 - iou: 0.8394 - dice_loss: 0.0888 - val_loss: 0.1933 - val_acc: 0.9300 - val_iou: 0.5387 - val_dice_loss: 0.3152
Epoch 112/200
73/73 [==============================] - ETA: 0s - loss: 0.0310 - acc: 0.9633 - iou: 0.8068 - dice_loss: 0.1091
Epoch 112: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 233ms/step - loss: 0.0310 - acc: 0.9633 - iou: 0.8068 - dice_loss: 0.1091 - val_loss: 0.2296 - val_acc: 0.9238 - val_iou: 0.5241 - val_dice_loss: 0.3309
Epoch 113/200
73/73 [==============================] - ETA: 0s - loss: 0.0275 - acc: 0.9644 - iou: 0.8221 - dice_loss: 0.0987
Epoch 113: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 239ms/step - loss: 0.0275 - acc: 0.9644 - iou: 0.8221 - dice_loss: 0.0987 - val_loss: 0.1718 - val_acc: 0.9376 - val_iou: 0.5569 - val_dice_loss: 0.2986
Epoch 114/200
73/73 [==============================] - ETA: 0s - loss: 0.0216 - acc: 0.9661 - iou: 0.8407 - dice_loss: 0.0882
Epoch 114: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 233ms/step - loss: 0.0216 - acc: 0.9661 - iou: 0.8407 - dice_loss: 0.0882 - val_loss: 0.1677 - val_acc: 0.9376 - val_iou: 0.5725 - val_dice_loss: 0.2878
Epoch 115/200
73/73 [==============================] - ETA: 0s - loss: 0.0192 - acc: 0.9668 - iou: 0.8641 - dice_loss: 0.0737
Epoch 115: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 234ms/step - loss: 0.0192 - acc: 0.9668 - iou: 0.8641 - dice_loss: 0.0737 - val_loss: 0.1941 - val_acc: 0.9336 - val_iou: 0.5666 - val_dice_loss: 0.2942
Epoch 116/200
73/73 [==============================] - ETA: 0s - loss: 0.0183 - acc: 0.9670 - iou: 0.8663 - dice_loss: 0.0725
Epoch 116: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 232ms/step - loss: 0.0183 - acc: 0.9670 - iou: 0.8663 - dice_loss: 0.0725 - val_loss: 0.1846 - val_acc: 0.9355 - val_iou: 0.5670 - val_dice_loss: 0.2928
Epoch 117/200
73/73 [==============================] - ETA: 0s - loss: 0.0177 - acc: 0.9671 - iou: 0.8670 - dice_loss: 0.0726
Epoch 117: val_loss did not improve from 0.12909
73/73 [==============================] - 18s 241ms/step - loss: 0.0177 - acc: 0.9671 - iou: 0.8670 - dice_loss: 0.0726 - val_loss: 0.1871 - val_acc: 0.9348 - val_iou: 0.5691 - val_dice_loss: 0.2916
Epoch 118/200
73/73 [==============================] - ETA: 0s - loss: 0.0163 - acc: 0.9676 - iou: 0.8755 - dice_loss: 0.0675
Epoch 118: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 234ms/step - loss: 0.0163 - acc: 0.9676 - iou: 0.8755 - dice_loss: 0.0675 - val_loss: 0.1942 - val_acc: 0.9355 - val_iou: 0.5727 - val_dice_loss: 0.2876
Epoch 119/200
73/73 [==============================] - ETA: 0s - loss: 0.0224 - acc: 0.9656 - iou: 0.8588 - dice_loss: 0.0779
Epoch 119: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 233ms/step - loss: 0.0224 - acc: 0.9656 - iou: 0.8588 - dice_loss: 0.0779 - val_loss: 0.5501 - val_acc: 0.9024 - val_iou: 0.4049 - val_dice_loss: 0.4418
Epoch 120/200
73/73 [==============================] - ETA: 0s - loss: 0.0886 - acc: 0.9463 - iou: 0.6103 - dice_loss: 0.2470
Epoch 120: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 234ms/step - loss: 0.0886 - acc: 0.9463 - iou: 0.6103 - dice_loss: 0.2470 - val_loss: 0.5564 - val_acc: 0.8924 - val_iou: 0.4539 - val_dice_loss: 0.3946
Epoch 121/200
73/73 [==============================] - ETA: 0s - loss: 0.0529 - acc: 0.9558 - iou: 0.7127 - dice_loss: 0.1724
Epoch 121: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 238ms/step - loss: 0.0529 - acc: 0.9558 - iou: 0.7127 - dice_loss: 0.1724 - val_loss: 0.1801 - val_acc: 0.9286 - val_iou: 0.5373 - val_dice_loss: 0.3224
Epoch 122/200
73/73 [==============================] - ETA: 0s - loss: 0.0282 - acc: 0.9640 - iou: 0.8077 - dice_loss: 0.1081
Epoch 122: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 235ms/step - loss: 0.0282 - acc: 0.9640 - iou: 0.8077 - dice_loss: 0.1081 - val_loss: 0.1608 - val_acc: 0.9386 - val_iou: 0.5548 - val_dice_loss: 0.3029
Epoch 123/200
73/73 [==============================] - ETA: 0s - loss: 0.0265 - acc: 0.9645 - iou: 0.8226 - dice_loss: 0.0991
Epoch 123: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 232ms/step - loss: 0.0265 - acc: 0.9645 - iou: 0.8226 - dice_loss: 0.0991 - val_loss: 0.1590 - val_acc: 0.9382 - val_iou: 0.5533 - val_dice_loss: 0.3028
Epoch 124/200
73/73 [==============================] - ETA: 0s - loss: 0.0212 - acc: 0.9658 - iou: 0.8473 - dice_loss: 0.0839
Epoch 124: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 234ms/step - loss: 0.0212 - acc: 0.9658 - iou: 0.8473 - dice_loss: 0.0839 - val_loss: 0.1750 - val_acc: 0.9376 - val_iou: 0.5814 - val_dice_loss: 0.2812
Epoch 125/200
73/73 [==============================] - ETA: 0s - loss: 0.0199 - acc: 0.9663 - iou: 0.8606 - dice_loss: 0.0760
Epoch 125: val_loss did not improve from 0.12909
73/73 [==============================] - 18s 251ms/step - loss: 0.0199 - acc: 0.9663 - iou: 0.8606 - dice_loss: 0.0760 - val_loss: 0.1432 - val_acc: 0.9421 - val_iou: 0.5921 - val_dice_loss: 0.2662
Epoch 126/200
73/73 [==============================] - ETA: 0s - loss: 0.0222 - acc: 0.9656 - iou: 0.8517 - dice_loss: 0.0812
Epoch 126: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 229ms/step - loss: 0.0222 - acc: 0.9656 - iou: 0.8517 - dice_loss: 0.0812 - val_loss: 0.1634 - val_acc: 0.9402 - val_iou: 0.5653 - val_dice_loss: 0.2930
Epoch 127/200
73/73 [==============================] - ETA: 0s - loss: 0.0210 - acc: 0.9657 - iou: 0.8540 - dice_loss: 0.0797
Epoch 127: val_loss did not improve from 0.12909
73/73 [==============================] - 16s 226ms/step - loss: 0.0210 - acc: 0.9657 - iou: 0.8540 - dice_loss: 0.0797 - val_loss: 0.1646 - val_acc: 0.9385 - val_iou: 0.5895 - val_dice_loss: 0.2726
Epoch 128/200
73/73 [==============================] - ETA: 0s - loss: 0.0169 - acc: 0.9669 - iou: 0.8781 - dice_loss: 0.0656
Epoch 128: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 234ms/step - loss: 0.0169 - acc: 0.9669 - iou: 0.8781 - dice_loss: 0.0656 - val_loss: 0.1620 - val_acc: 0.9408 - val_iou: 0.6003 - val_dice_loss: 0.2638
Epoch 129/200
73/73 [==============================] - ETA: 0s - loss: 0.0163 - acc: 0.9671 - iou: 0.8806 - dice_loss: 0.0643
Epoch 129: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 234ms/step - loss: 0.0163 - acc: 0.9671 - iou: 0.8806 - dice_loss: 0.0643 - val_loss: 0.1578 - val_acc: 0.9408 - val_iou: 0.6102 - val_dice_loss: 0.2559
Epoch 130/200
73/73 [==============================] - ETA: 0s - loss: 0.0160 - acc: 0.9672 - iou: 0.8808 - dice_loss: 0.0642
Epoch 130: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 236ms/step - loss: 0.0160 - acc: 0.9672 - iou: 0.8808 - dice_loss: 0.0642 - val_loss: 0.1895 - val_acc: 0.9342 - val_iou: 0.5668 - val_dice_loss: 0.2930
Epoch 131/200
73/73 [==============================] - ETA: 0s - loss: 0.0170 - acc: 0.9668 - iou: 0.8790 - dice_loss: 0.0652
Epoch 131: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 232ms/step - loss: 0.0170 - acc: 0.9668 - iou: 0.8790 - dice_loss: 0.0652 - val_loss: 0.1733 - val_acc: 0.9352 - val_iou: 0.5939 - val_dice_loss: 0.2706
Epoch 132/200
73/73 [==============================] - ETA: 0s - loss: 0.0171 - acc: 0.9668 - iou: 0.8795 - dice_loss: 0.0648
Epoch 132: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 233ms/step - loss: 0.0171 - acc: 0.9668 - iou: 0.8795 - dice_loss: 0.0648 - val_loss: 0.1813 - val_acc: 0.9369 - val_iou: 0.5886 - val_dice_loss: 0.2753
Epoch 133/200
73/73 [==============================] - ETA: 0s - loss: 0.0161 - acc: 0.9672 - iou: 0.8788 - dice_loss: 0.0656
Epoch 133: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 232ms/step - loss: 0.0161 - acc: 0.9672 - iou: 0.8788 - dice_loss: 0.0656 - val_loss: 0.1611 - val_acc: 0.9397 - val_iou: 0.5935 - val_dice_loss: 0.2701
Epoch 134/200
73/73 [==============================] - ETA: 0s - loss: 0.0142 - acc: 0.9676 - iou: 0.8898 - dice_loss: 0.0593
Epoch 134: val_loss did not improve from 0.12909
73/73 [==============================] - 18s 242ms/step - loss: 0.0142 - acc: 0.9676 - iou: 0.8898 - dice_loss: 0.0593 - val_loss: 0.1696 - val_acc: 0.9393 - val_iou: 0.5962 - val_dice_loss: 0.2689
Epoch 135/200
73/73 [==============================] - ETA: 0s - loss: 0.0135 - acc: 0.9677 - iou: 0.9015 - dice_loss: 0.0524
Epoch 135: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 233ms/step - loss: 0.0135 - acc: 0.9677 - iou: 0.9015 - dice_loss: 0.0524 - val_loss: 0.1661 - val_acc: 0.9382 - val_iou: 0.5978 - val_dice_loss: 0.2672
Epoch 136/200
73/73 [==============================] - ETA: 0s - loss: 0.0132 - acc: 0.9679 - iou: 0.9032 - dice_loss: 0.0513
Epoch 136: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 227ms/step - loss: 0.0132 - acc: 0.9679 - iou: 0.9032 - dice_loss: 0.0513 - val_loss: 0.1739 - val_acc: 0.9382 - val_iou: 0.5984 - val_dice_loss: 0.2668
Epoch 137/200
73/73 [==============================] - ETA: 0s - loss: 0.0130 - acc: 0.9680 - iou: 0.8966 - dice_loss: 0.0562
Epoch 137: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 233ms/step - loss: 0.0130 - acc: 0.9680 - iou: 0.8966 - dice_loss: 0.0562 - val_loss: 0.1781 - val_acc: 0.9382 - val_iou: 0.6004 - val_dice_loss: 0.2642
Epoch 138/200
73/73 [==============================] - ETA: 0s - loss: 0.0132 - acc: 0.9680 - iou: 0.9007 - dice_loss: 0.0528
Epoch 138: val_loss did not improve from 0.12909
73/73 [==============================] - 18s 251ms/step - loss: 0.0132 - acc: 0.9680 - iou: 0.9007 - dice_loss: 0.0528 - val_loss: 0.1681 - val_acc: 0.9391 - val_iou: 0.5994 - val_dice_loss: 0.2651
Epoch 139/200
73/73 [==============================] - ETA: 0s - loss: 0.0127 - acc: 0.9679 - iou: 0.9082 - dice_loss: 0.0486
Epoch 139: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 229ms/step - loss: 0.0127 - acc: 0.9679 - iou: 0.9082 - dice_loss: 0.0486 - val_loss: 0.1806 - val_acc: 0.9383 - val_iou: 0.6031 - val_dice_loss: 0.2627
Epoch 140/200
73/73 [==============================] - ETA: 0s - loss: 0.0123 - acc: 0.9681 - iou: 0.9090 - dice_loss: 0.0481
Epoch 140: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 228ms/step - loss: 0.0123 - acc: 0.9681 - iou: 0.9090 - dice_loss: 0.0481 - val_loss: 0.1833 - val_acc: 0.9377 - val_iou: 0.5944 - val_dice_loss: 0.2704
Epoch 141/200
73/73 [==============================] - ETA: 0s - loss: 0.0122 - acc: 0.9682 - iou: 0.9068 - dice_loss: 0.0495
Epoch 141: val_loss did not improve from 0.12909
73/73 [==============================] - 16s 226ms/step - loss: 0.0122 - acc: 0.9682 - iou: 0.9068 - dice_loss: 0.0495 - val_loss: 0.1818 - val_acc: 0.9378 - val_iou: 0.5863 - val_dice_loss: 0.2763
Epoch 142/200
73/73 [==============================] - ETA: 0s - loss: 0.0119 - acc: 0.9682 - iou: 0.9111 - dice_loss: 0.0470
Epoch 142: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 232ms/step - loss: 0.0119 - acc: 0.9682 - iou: 0.9111 - dice_loss: 0.0470 - val_loss: 0.1763 - val_acc: 0.9399 - val_iou: 0.6014 - val_dice_loss: 0.2628
Epoch 143/200
73/73 [==============================] - ETA: 0s - loss: 0.0113 - acc: 0.9684 - iou: 0.9161 - dice_loss: 0.0442
Epoch 143: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 239ms/step - loss: 0.0113 - acc: 0.9684 - iou: 0.9161 - dice_loss: 0.0442 - val_loss: 0.1825 - val_acc: 0.9383 - val_iou: 0.6032 - val_dice_loss: 0.2620
Epoch 144/200
73/73 [==============================] - ETA: 0s - loss: 0.0113 - acc: 0.9683 - iou: 0.9150 - dice_loss: 0.0450
Epoch 144: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 228ms/step - loss: 0.0113 - acc: 0.9683 - iou: 0.9150 - dice_loss: 0.0450 - val_loss: 0.1860 - val_acc: 0.9381 - val_iou: 0.6049 - val_dice_loss: 0.2607
Epoch 145/200
73/73 [==============================] - ETA: 0s - loss: 0.0115 - acc: 0.9683 - iou: 0.9139 - dice_loss: 0.0456
Epoch 145: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 232ms/step - loss: 0.0115 - acc: 0.9683 - iou: 0.9139 - dice_loss: 0.0456 - val_loss: 0.1833 - val_acc: 0.9392 - val_iou: 0.5966 - val_dice_loss: 0.2691
Epoch 146/200
73/73 [==============================] - ETA: 0s - loss: 0.0111 - acc: 0.9684 - iou: 0.9214 - dice_loss: 0.0412
Epoch 146: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 233ms/step - loss: 0.0111 - acc: 0.9684 - iou: 0.9214 - dice_loss: 0.0412 - val_loss: 0.1876 - val_acc: 0.9386 - val_iou: 0.6024 - val_dice_loss: 0.2640
Epoch 147/200
73/73 [==============================] - ETA: 0s - loss: 0.0114 - acc: 0.9682 - iou: 0.9161 - dice_loss: 0.0446
Epoch 147: val_loss did not improve from 0.12909
73/73 [==============================] - 19s 257ms/step - loss: 0.0114 - acc: 0.9682 - iou: 0.9161 - dice_loss: 0.0446 - val_loss: 0.1875 - val_acc: 0.9390 - val_iou: 0.6057 - val_dice_loss: 0.2589
Epoch 148/200
73/73 [==============================] - ETA: 0s - loss: 0.0116 - acc: 0.9682 - iou: 0.9188 - dice_loss: 0.0426
Epoch 148: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 228ms/step - loss: 0.0116 - acc: 0.9682 - iou: 0.9188 - dice_loss: 0.0426 - val_loss: 0.1927 - val_acc: 0.9363 - val_iou: 0.6018 - val_dice_loss: 0.2634
Epoch 149/200
73/73 [==============================] - ETA: 0s - loss: 0.0106 - acc: 0.9685 - iou: 0.9219 - dice_loss: 0.0411
Epoch 149: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 234ms/step - loss: 0.0106 - acc: 0.9685 - iou: 0.9219 - dice_loss: 0.0411 - val_loss: 0.1901 - val_acc: 0.9384 - val_iou: 0.6002 - val_dice_loss: 0.2653
Epoch 150/200
73/73 [==============================] - ETA: 0s - loss: 0.0112 - acc: 0.9684 - iou: 0.9218 - dice_loss: 0.0410
Epoch 150: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 234ms/step - loss: 0.0112 - acc: 0.9684 - iou: 0.9218 - dice_loss: 0.0410 - val_loss: 0.1818 - val_acc: 0.9390 - val_iou: 0.6158 - val_dice_loss: 0.2510
Epoch 151/200
73/73 [==============================] - ETA: 0s - loss: 0.0107 - acc: 0.9684 - iou: 0.9218 - dice_loss: 0.0412
Epoch 151: val_loss did not improve from 0.12909
73/73 [==============================] - 18s 242ms/step - loss: 0.0107 - acc: 0.9684 - iou: 0.9218 - dice_loss: 0.0412 - val_loss: 0.1923 - val_acc: 0.9378 - val_iou: 0.5937 - val_dice_loss: 0.2701
Epoch 152/200
73/73 [==============================] - ETA: 0s - loss: 0.0101 - acc: 0.9686 - iou: 0.9278 - dice_loss: 0.0377
Epoch 152: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 233ms/step - loss: 0.0101 - acc: 0.9686 - iou: 0.9278 - dice_loss: 0.0377 - val_loss: 0.1917 - val_acc: 0.9386 - val_iou: 0.6025 - val_dice_loss: 0.2636
Epoch 153/200
73/73 [==============================] - ETA: 0s - loss: 0.0101 - acc: 0.9685 - iou: 0.9266 - dice_loss: 0.0384
Epoch 153: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 230ms/step - loss: 0.0101 - acc: 0.9685 - iou: 0.9266 - dice_loss: 0.0384 - val_loss: 0.1858 - val_acc: 0.9396 - val_iou: 0.6090 - val_dice_loss: 0.2559
Epoch 154/200
73/73 [==============================] - ETA: 0s - loss: 0.0105 - acc: 0.9684 - iou: 0.9228 - dice_loss: 0.0407
Epoch 154: val_loss did not improve from 0.12909
73/73 [==============================] - 19s 256ms/step - loss: 0.0105 - acc: 0.9684 - iou: 0.9228 - dice_loss: 0.0407 - val_loss: 0.1843 - val_acc: 0.9385 - val_iou: 0.5997 - val_dice_loss: 0.2654
Epoch 155/200
73/73 [==============================] - ETA: 0s - loss: 0.0099 - acc: 0.9686 - iou: 0.9258 - dice_loss: 0.0392
Epoch 155: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 231ms/step - loss: 0.0099 - acc: 0.9686 - iou: 0.9258 - dice_loss: 0.0392 - val_loss: 0.1846 - val_acc: 0.9397 - val_iou: 0.6121 - val_dice_loss: 0.2547
Epoch 156/200
73/73 [==============================] - ETA: 0s - loss: 0.0092 - acc: 0.9688 - iou: 0.9324 - dice_loss: 0.0353
Epoch 156: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 228ms/step - loss: 0.0092 - acc: 0.9688 - iou: 0.9324 - dice_loss: 0.0353 - val_loss: 0.1921 - val_acc: 0.9394 - val_iou: 0.6094 - val_dice_loss: 0.2568
Epoch 157/200
73/73 [==============================] - ETA: 0s - loss: 0.0092 - acc: 0.9688 - iou: 0.9297 - dice_loss: 0.0375
Epoch 157: val_loss did not improve from 0.12909
73/73 [==============================] - 18s 254ms/step - loss: 0.0092 - acc: 0.9688 - iou: 0.9297 - dice_loss: 0.0375 - val_loss: 0.1909 - val_acc: 0.9384 - val_iou: 0.6148 - val_dice_loss: 0.2513
Epoch 158/200
73/73 [==============================] - ETA: 0s - loss: 0.0127 - acc: 0.9677 - iou: 0.9151 - dice_loss: 0.0448
Epoch 158: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 230ms/step - loss: 0.0127 - acc: 0.9677 - iou: 0.9151 - dice_loss: 0.0448 - val_loss: 0.1999 - val_acc: 0.9358 - val_iou: 0.5951 - val_dice_loss: 0.2683
Epoch 159/200
73/73 [==============================] - ETA: 0s - loss: 0.0806 - acc: 0.9479 - iou: 0.6650 - dice_loss: 0.2131
Epoch 159: val_loss did not improve from 0.12909
73/73 [==============================] - 16s 226ms/step - loss: 0.0806 - acc: 0.9479 - iou: 0.6650 - dice_loss: 0.2131 - val_loss: 3.0546 - val_acc: 0.5592 - val_iou: 0.1510 - val_dice_loss: 0.7422
Epoch 160/200
73/73 [==============================] - ETA: 0s - loss: 0.0915 - acc: 0.9440 - iou: 0.5938 - dice_loss: 0.2639
Epoch 160: val_loss did not improve from 0.12909
73/73 [==============================] - 18s 254ms/step - loss: 0.0915 - acc: 0.9440 - iou: 0.5938 - dice_loss: 0.2639 - val_loss: 0.2128 - val_acc: 0.9201 - val_iou: 0.4810 - val_dice_loss: 0.3628
Epoch 161/200
73/73 [==============================] - ETA: 0s - loss: 0.0497 - acc: 0.9568 - iou: 0.7378 - dice_loss: 0.1527
Epoch 161: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 233ms/step - loss: 0.0497 - acc: 0.9568 - iou: 0.7378 - dice_loss: 0.1527 - val_loss: 0.1614 - val_acc: 0.9321 - val_iou: 0.5273 - val_dice_loss: 0.3276
Epoch 162/200
73/73 [==============================] - ETA: 0s - loss: 0.0321 - acc: 0.9621 - iou: 0.8103 - dice_loss: 0.1063
Epoch 162: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 232ms/step - loss: 0.0321 - acc: 0.9621 - iou: 0.8103 - dice_loss: 0.1063 - val_loss: 0.1775 - val_acc: 0.9357 - val_iou: 0.5458 - val_dice_loss: 0.3118
Epoch 163/200
73/73 [==============================] - ETA: 0s - loss: 0.0243 - acc: 0.9645 - iou: 0.8497 - dice_loss: 0.0829
Epoch 163: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 238ms/step - loss: 0.0243 - acc: 0.9645 - iou: 0.8497 - dice_loss: 0.0829 - val_loss: 0.1883 - val_acc: 0.9346 - val_iou: 0.5520 - val_dice_loss: 0.3041
Epoch 164/200
73/73 [==============================] - ETA: 0s - loss: 0.0257 - acc: 0.9640 - iou: 0.8412 - dice_loss: 0.0872
Epoch 164: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 232ms/step - loss: 0.0257 - acc: 0.9640 - iou: 0.8412 - dice_loss: 0.0872 - val_loss: 0.1700 - val_acc: 0.9353 - val_iou: 0.5829 - val_dice_loss: 0.2788
Epoch 165/200
73/73 [==============================] - ETA: 0s - loss: 0.0169 - acc: 0.9665 - iou: 0.8851 - dice_loss: 0.0615
Epoch 165: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 232ms/step - loss: 0.0169 - acc: 0.9665 - iou: 0.8851 - dice_loss: 0.0615 - val_loss: 0.1792 - val_acc: 0.9373 - val_iou: 0.5813 - val_dice_loss: 0.2803
Epoch 166/200
73/73 [==============================] - ETA: 0s - loss: 0.0144 - acc: 0.9673 - iou: 0.9007 - dice_loss: 0.0527
Epoch 166: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 234ms/step - loss: 0.0144 - acc: 0.9673 - iou: 0.9007 - dice_loss: 0.0527 - val_loss: 0.1783 - val_acc: 0.9363 - val_iou: 0.5872 - val_dice_loss: 0.2762
Epoch 167/200
73/73 [==============================] - ETA: 0s - loss: 0.0134 - acc: 0.9675 - iou: 0.9066 - dice_loss: 0.0494
Epoch 167: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 230ms/step - loss: 0.0134 - acc: 0.9675 - iou: 0.9066 - dice_loss: 0.0494 - val_loss: 0.1942 - val_acc: 0.9358 - val_iou: 0.5847 - val_dice_loss: 0.2773
Epoch 168/200
73/73 [==============================] - ETA: 0s - loss: 0.0132 - acc: 0.9675 - iou: 0.9079 - dice_loss: 0.0487
Epoch 168: val_loss did not improve from 0.12909
73/73 [==============================] - 16s 226ms/step - loss: 0.0132 - acc: 0.9675 - iou: 0.9079 - dice_loss: 0.0487 - val_loss: 0.1983 - val_acc: 0.9354 - val_iou: 0.5814 - val_dice_loss: 0.2792
Epoch 169/200
73/73 [==============================] - ETA: 0s - loss: 0.0127 - acc: 0.9677 - iou: 0.9130 - dice_loss: 0.0462
Epoch 169: val_loss did not improve from 0.12909
73/73 [==============================] - 16s 226ms/step - loss: 0.0127 - acc: 0.9677 - iou: 0.9130 - dice_loss: 0.0462 - val_loss: 0.2039 - val_acc: 0.9351 - val_iou: 0.5832 - val_dice_loss: 0.2789
Epoch 170/200
73/73 [==============================] - ETA: 0s - loss: 0.0124 - acc: 0.9677 - iou: 0.9129 - dice_loss: 0.0460
Epoch 170: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 237ms/step - loss: 0.0124 - acc: 0.9677 - iou: 0.9129 - dice_loss: 0.0460 - val_loss: 0.2084 - val_acc: 0.9350 - val_iou: 0.5939 - val_dice_loss: 0.2706
Epoch 171/200
73/73 [==============================] - ETA: 0s - loss: 0.0126 - acc: 0.9677 - iou: 0.9085 - dice_loss: 0.0486
Epoch 171: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 232ms/step - loss: 0.0126 - acc: 0.9677 - iou: 0.9085 - dice_loss: 0.0486 - val_loss: 0.2053 - val_acc: 0.9349 - val_iou: 0.5860 - val_dice_loss: 0.2763
Epoch 172/200
73/73 [==============================] - ETA: 0s - loss: 0.0118 - acc: 0.9679 - iou: 0.9204 - dice_loss: 0.0418
Epoch 172: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 234ms/step - loss: 0.0118 - acc: 0.9679 - iou: 0.9204 - dice_loss: 0.0418 - val_loss: 0.2056 - val_acc: 0.9349 - val_iou: 0.5814 - val_dice_loss: 0.2810
Epoch 173/200
73/73 [==============================] - ETA: 0s - loss: 0.0112 - acc: 0.9680 - iou: 0.9228 - dice_loss: 0.0404
Epoch 173: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 237ms/step - loss: 0.0112 - acc: 0.9680 - iou: 0.9228 - dice_loss: 0.0404 - val_loss: 0.2102 - val_acc: 0.9336 - val_iou: 0.5794 - val_dice_loss: 0.2824
Epoch 174/200
73/73 [==============================] - ETA: 0s - loss: 0.0116 - acc: 0.9680 - iou: 0.9218 - dice_loss: 0.0410
Epoch 174: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 236ms/step - loss: 0.0116 - acc: 0.9680 - iou: 0.9218 - dice_loss: 0.0410 - val_loss: 0.2133 - val_acc: 0.9341 - val_iou: 0.5726 - val_dice_loss: 0.2888
Epoch 175/200
73/73 [==============================] - ETA: 0s - loss: 0.0118 - acc: 0.9679 - iou: 0.9190 - dice_loss: 0.0426
Epoch 175: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 233ms/step - loss: 0.0118 - acc: 0.9679 - iou: 0.9190 - dice_loss: 0.0426 - val_loss: 0.1945 - val_acc: 0.9365 - val_iou: 0.5812 - val_dice_loss: 0.2809
Epoch 176/200
73/73 [==============================] - ETA: 0s - loss: 0.0162 - acc: 0.9665 - iou: 0.9035 - dice_loss: 0.0515
Epoch 176: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 232ms/step - loss: 0.0162 - acc: 0.9665 - iou: 0.9035 - dice_loss: 0.0515 - val_loss: 0.2821 - val_acc: 0.9257 - val_iou: 0.5531 - val_dice_loss: 0.3002
Epoch 177/200
73/73 [==============================] - ETA: 0s - loss: 0.0272 - acc: 0.9633 - iou: 0.8389 - dice_loss: 0.0886
Epoch 177: val_loss did not improve from 0.12909
73/73 [==============================] - 18s 240ms/step - loss: 0.0272 - acc: 0.9633 - iou: 0.8389 - dice_loss: 0.0886 - val_loss: 0.2365 - val_acc: 0.9232 - val_iou: 0.5660 - val_dice_loss: 0.2955
Epoch 178/200
73/73 [==============================] - ETA: 0s - loss: 0.0158 - acc: 0.9668 - iou: 0.8965 - dice_loss: 0.0549
Epoch 178: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 232ms/step - loss: 0.0158 - acc: 0.9668 - iou: 0.8965 - dice_loss: 0.0549 - val_loss: 0.2011 - val_acc: 0.9329 - val_iou: 0.5842 - val_dice_loss: 0.2800
Epoch 179/200
73/73 [==============================] - ETA: 0s - loss: 0.0129 - acc: 0.9676 - iou: 0.9129 - dice_loss: 0.0458
Epoch 179: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 230ms/step - loss: 0.0129 - acc: 0.9676 - iou: 0.9129 - dice_loss: 0.0458 - val_loss: 0.1990 - val_acc: 0.9369 - val_iou: 0.5904 - val_dice_loss: 0.2750
Epoch 180/200
73/73 [==============================] - ETA: 0s - loss: 0.0111 - acc: 0.9681 - iou: 0.9241 - dice_loss: 0.0397
Epoch 180: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 237ms/step - loss: 0.0111 - acc: 0.9681 - iou: 0.9241 - dice_loss: 0.0397 - val_loss: 0.2064 - val_acc: 0.9351 - val_iou: 0.5912 - val_dice_loss: 0.2740
Epoch 181/200
73/73 [==============================] - ETA: 0s - loss: 0.0129 - acc: 0.9676 - iou: 0.9207 - dice_loss: 0.0415
Epoch 181: val_loss did not improve from 0.12909
73/73 [==============================] - 16s 225ms/step - loss: 0.0129 - acc: 0.9676 - iou: 0.9207 - dice_loss: 0.0415 - val_loss: 0.2128 - val_acc: 0.9339 - val_iou: 0.5860 - val_dice_loss: 0.2803
Epoch 182/200
73/73 [==============================] - ETA: 0s - loss: 0.0117 - acc: 0.9679 - iou: 0.9197 - dice_loss: 0.0421
Epoch 182: val_loss did not improve from 0.12909
73/73 [==============================] - 16s 224ms/step - loss: 0.0117 - acc: 0.9679 - iou: 0.9197 - dice_loss: 0.0421 - val_loss: 0.1982 - val_acc: 0.9375 - val_iou: 0.5941 - val_dice_loss: 0.2717
Epoch 183/200
73/73 [==============================] - ETA: 0s - loss: 0.0111 - acc: 0.9680 - iou: 0.9252 - dice_loss: 0.0392
Epoch 183: val_loss did not improve from 0.12909
73/73 [==============================] - 18s 248ms/step - loss: 0.0111 - acc: 0.9680 - iou: 0.9252 - dice_loss: 0.0392 - val_loss: 0.2010 - val_acc: 0.9369 - val_iou: 0.6008 - val_dice_loss: 0.2666
Epoch 184/200
73/73 [==============================] - ETA: 0s - loss: 0.0104 - acc: 0.9682 - iou: 0.9305 - dice_loss: 0.0363
Epoch 184: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 230ms/step - loss: 0.0104 - acc: 0.9682 - iou: 0.9305 - dice_loss: 0.0363 - val_loss: 0.2076 - val_acc: 0.9353 - val_iou: 0.5996 - val_dice_loss: 0.2687
Epoch 185/200
73/73 [==============================] - ETA: 0s - loss: 0.0099 - acc: 0.9685 - iou: 0.9309 - dice_loss: 0.0361
Epoch 185: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 231ms/step - loss: 0.0099 - acc: 0.9685 - iou: 0.9309 - dice_loss: 0.0361 - val_loss: 0.2061 - val_acc: 0.9363 - val_iou: 0.5985 - val_dice_loss: 0.2679
Epoch 186/200
73/73 [==============================] - ETA: 0s - loss: 0.0098 - acc: 0.9685 - iou: 0.9298 - dice_loss: 0.0367
Epoch 186: val_loss did not improve from 0.12909
73/73 [==============================] - 18s 250ms/step - loss: 0.0098 - acc: 0.9685 - iou: 0.9298 - dice_loss: 0.0367 - val_loss: 0.2089 - val_acc: 0.9362 - val_iou: 0.5954 - val_dice_loss: 0.2709
Epoch 187/200
73/73 [==============================] - ETA: 0s - loss: 0.0094 - acc: 0.9686 - iou: 0.9338 - dice_loss: 0.0345
Epoch 187: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 227ms/step - loss: 0.0094 - acc: 0.9686 - iou: 0.9338 - dice_loss: 0.0345 - val_loss: 0.2072 - val_acc: 0.9353 - val_iou: 0.5963 - val_dice_loss: 0.2699
Epoch 188/200
73/73 [==============================] - ETA: 0s - loss: 0.0090 - acc: 0.9687 - iou: 0.9388 - dice_loss: 0.0318
Epoch 188: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 228ms/step - loss: 0.0090 - acc: 0.9687 - iou: 0.9388 - dice_loss: 0.0318 - val_loss: 0.2084 - val_acc: 0.9373 - val_iou: 0.6008 - val_dice_loss: 0.2659
Epoch 189/200
73/73 [==============================] - ETA: 0s - loss: 0.0089 - acc: 0.9686 - iou: 0.9398 - dice_loss: 0.0312
Epoch 189: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 236ms/step - loss: 0.0089 - acc: 0.9686 - iou: 0.9398 - dice_loss: 0.0312 - val_loss: 0.2102 - val_acc: 0.9366 - val_iou: 0.5944 - val_dice_loss: 0.2706
Epoch 190/200
73/73 [==============================] - ETA: 0s - loss: 0.0085 - acc: 0.9688 - iou: 0.9415 - dice_loss: 0.0304
Epoch 190: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 236ms/step - loss: 0.0085 - acc: 0.9688 - iou: 0.9415 - dice_loss: 0.0304 - val_loss: 0.2130 - val_acc: 0.9369 - val_iou: 0.5992 - val_dice_loss: 0.2672
Epoch 191/200
73/73 [==============================] - ETA: 0s - loss: 0.0091 - acc: 0.9686 - iou: 0.9393 - dice_loss: 0.0315
Epoch 191: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 230ms/step - loss: 0.0091 - acc: 0.9686 - iou: 0.9393 - dice_loss: 0.0315 - val_loss: 0.2158 - val_acc: 0.9373 - val_iou: 0.5976 - val_dice_loss: 0.2673
Epoch 192/200
73/73 [==============================] - ETA: 0s - loss: 0.0083 - acc: 0.9689 - iou: 0.9438 - dice_loss: 0.0291
Epoch 192: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 229ms/step - loss: 0.0083 - acc: 0.9689 - iou: 0.9438 - dice_loss: 0.0291 - val_loss: 0.2208 - val_acc: 0.9363 - val_iou: 0.5974 - val_dice_loss: 0.2679
Epoch 193/200
73/73 [==============================] - ETA: 0s - loss: 0.0087 - acc: 0.9687 - iou: 0.9426 - dice_loss: 0.0297
Epoch 193: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 238ms/step - loss: 0.0087 - acc: 0.9687 - iou: 0.9426 - dice_loss: 0.0297 - val_loss: 0.2145 - val_acc: 0.9348 - val_iou: 0.5982 - val_dice_loss: 0.2675
Epoch 194/200
73/73 [==============================] - ETA: 0s - loss: 0.0089 - acc: 0.9687 - iou: 0.9400 - dice_loss: 0.0311
Epoch 194: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 229ms/step - loss: 0.0089 - acc: 0.9687 - iou: 0.9400 - dice_loss: 0.0311 - val_loss: 0.2166 - val_acc: 0.9363 - val_iou: 0.5994 - val_dice_loss: 0.2674
Epoch 195/200
73/73 [==============================] - ETA: 0s - loss: 0.0080 - acc: 0.9689 - iou: 0.9485 - dice_loss: 0.0265
Epoch 195: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 227ms/step - loss: 0.0080 - acc: 0.9689 - iou: 0.9485 - dice_loss: 0.0265 - val_loss: 0.2158 - val_acc: 0.9375 - val_iou: 0.5971 - val_dice_loss: 0.2669
Epoch 196/200
73/73 [==============================] - ETA: 0s - loss: 0.0077 - acc: 0.9691 - iou: 0.9471 - dice_loss: 0.0273
Epoch 196: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 236ms/step - loss: 0.0077 - acc: 0.9691 - iou: 0.9471 - dice_loss: 0.0273 - val_loss: 0.2179 - val_acc: 0.9375 - val_iou: 0.5990 - val_dice_loss: 0.2668
Epoch 197/200
73/73 [==============================] - ETA: 0s - loss: 0.0078 - acc: 0.9690 - iou: 0.9456 - dice_loss: 0.0281
Epoch 197: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 231ms/step - loss: 0.0078 - acc: 0.9690 - iou: 0.9456 - dice_loss: 0.0281 - val_loss: 0.2187 - val_acc: 0.9372 - val_iou: 0.5988 - val_dice_loss: 0.2665
Epoch 198/200
73/73 [==============================] - ETA: 0s - loss: 0.0078 - acc: 0.9690 - iou: 0.9447 - dice_loss: 0.0292
Epoch 198: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 228ms/step - loss: 0.0078 - acc: 0.9690 - iou: 0.9447 - dice_loss: 0.0292 - val_loss: 0.2252 - val_acc: 0.9348 - val_iou: 0.6031 - val_dice_loss: 0.2639
Epoch 199/200
73/73 [==============================] - ETA: 0s - loss: 0.0076 - acc: 0.9691 - iou: 0.9481 - dice_loss: 0.0268
Epoch 199: val_loss did not improve from 0.12909
73/73 [==============================] - 17s 233ms/step - loss: 0.0076 - acc: 0.9691 - iou: 0.9481 - dice_loss: 0.0268 - val_loss: 0.2170 - val_acc: 0.9381 - val_iou: 0.6043 - val_dice_loss: 0.2621
Epoch 200/200
73/73 [==============================] - ETA: 0s - loss: 0.0073 - acc: 0.9691 - iou: 0.9515 - dice_loss: 0.0249
Epoch 200: val_loss did not improve from 0.12909
73/73 [==============================] - 18s 241ms/step - loss: 0.0073 - acc: 0.9691 - iou: 0.9515 - dice_loss: 0.0249 - val_loss: 0.2205 - val_acc: 0.9367 - val_iou: 0.6001 - val_dice_loss: 0.2662

Model Performence¶

In [ ]:
plt.figure(figsize=(20,5))

plt.subplot(1,4,1)
plt.title("Model Loss")
plt.plot(history.history['loss'], label="Training")
plt.plot(history.history['val_loss'], label="Validation")
plt.legend()
plt.grid()

plt.subplot(1,4,2)
plt.title("Model Accuracy")
plt.plot(history.history['acc'], label="Training")
plt.plot(history.history['val_acc'], label="Validation")
plt.legend()
plt.grid()

plt.subplot(1,4,3)
plt.title("IOU")
plt.plot(history.history['iou'], label="Training")
plt.plot(history.history['val_iou'], label="Validation")
plt.legend()
plt.grid()

plt.subplot(1,4,4)
plt.title("Model Dice Loss")
plt.plot(history.history['dice_loss'], label="Training")
plt.plot(history.history['val_dice_loss'], label="Validation")
plt.legend()
plt.grid()

plt.show()

Prediction¶

In [ ]:
segnet_pred = seg_model.predict(scan_test)
3/3 [==============================] - 4s 250ms/step
In [ ]:
plt.figure(figsize = (10,60))

i = 0
x = 0
while i < 45 :

    plt.subplot(15,3,i+1)
    plt.imshow(scan_test[x], 'gray')
    plt.title('Real medic Image')
    plt.axis('off')

    plt.subplot(15,3,i+2)
    plt.imshow(mask_test[x], 'gray')
    plt.title('Ground Truth Img')
    plt.axis('off')

    plt.subplot(15,3,i+3)
    plt.imshow(segnet_pred[x], 'gray')
    plt.title('Predicited Image')
    plt.axis('off')

    x += 1
    i += 3
plt.show()

DeepLab¶

Building the model¶

In [ ]:
def greytocolor(img):
  colored_img = img.repeat(3,axis = -1)
  return colored_img
In [ ]:
colored_scan_train = []
for i in range(len(scan_train)):
    colored_img = greytocolor(scan_train[i])
    colored_scan_train.append(colored_img)
colored_scan_train = np.array(colored_scan_train)
In [ ]:
colored_scan_test = []
for i in range(len(scan_test)):
    colored_img = greytocolor(scan_test[i])
    colored_scan_test.append(colored_img)
colored_scan_test = np.array(colored_scan_test)
In [ ]:
class ConvBlock(Layer):

    def __init__(self, filters=256, kernel_size=3, use_bias=False, dilation_rate=1, **kwargs):
        super(ConvBlock, self).__init__(**kwargs)

        self.filters = filters
        self.kernel_size = kernel_size
        self.use_bias = use_bias
        self.dilation_rate = dilation_rate

        self.net = Sequential([
            Conv2D(filters, kernel_size=kernel_size, strides=1, padding='same', dilation_rate=dilation_rate, use_bias=use_bias, kernel_initializer='he_normal'),
            BatchNormalization(),
            ReLU()
        ])

    def call(self, X): return self.net(X)

    def get_config(self):
        base_config = super().get_config()
        return {
            **base_config,
            "filters":self.filters,
            "kernel_size":self.kernel_size,
            "use_bias":self.use_bias,
            "dilation_rate":self.dilation_rate
        }
In [ ]:
def AtrousSpatialPyramidPooling(X):

    # Shapes
    _, height, width, _ = X.shape

    # Image Pooling
    image_pool = AveragePooling2D(pool_size=(height, width), name="ASPP-AvgPool2D")(X)
    image_pool = ConvBlock(kernel_size=1, name="ASPP-ConvBlock-1")(image_pool)
    image_pool = UpSampling2D(size=(height//image_pool.shape[1], width//image_pool.shape[2]), name="ASPP-UpSampling")(image_pool)

    # Conv Blocks
    conv_1 = ConvBlock(kernel_size=1, dilation_rate=1, name="ASPP-Conv-1")(X)
    conv_6 = ConvBlock(kernel_size=3, dilation_rate=6, name="ASPP-Conv-6")(X)
    conv_12 = ConvBlock(kernel_size=3, dilation_rate=12, name="ASPP-Conv-12")(X)
    conv_18 = ConvBlock(kernel_size=3, dilation_rate=18, name="ASPP-Conv-18")(X)

    # Concat All
    concat = Concatenate(axis=-1, name="ASPP-Concat")([image_pool, conv_1, conv_6, conv_12, conv_18])
    net = ConvBlock(kernel_size=1, name="ASPP-Net")(concat)

    return net
In [ ]:
LR = 1e-3
IMAGE_SIZE = 128

# Input
InputL = Input(shape=(128, 128, 3), name="InputLayer")

# Base Mode
resnet50 = ResNet50(include_top=False, weights='imagenet', input_tensor=InputL)

# ASPP Phase
DCNN = resnet50.get_layer('conv4_block6_2_relu').output
ASPP = AtrousSpatialPyramidPooling(DCNN)
ASPP = UpSampling2D(size=(IMAGE_SIZE//4//ASPP.shape[1], IMAGE_SIZE//4//ASPP.shape[2]), name="AtrousSpatial")(ASPP)

# LLF Phase
LLF = resnet50.get_layer('conv2_block3_2_relu').output
LLF = ConvBlock(filters=48, kernel_size=1, name="LLF-ConvBlock")(LLF)

# Combined
combined = Concatenate(axis=-1, name="Combine-LLF-ASPP")([ASPP, LLF])
features = ConvBlock(name="Top-ConvBlock-1")(combined)
features = ConvBlock(name="Top-ConvBlock-2")(features)
upsample = UpSampling2D(size=(IMAGE_SIZE//features.shape[1], IMAGE_SIZE//features.shape[1]), interpolation='bilinear', name="Top-UpSample")(features)

# Output Mask
PredMask = Conv2D(1, kernel_size=3, strides=1, padding='same', activation='sigmoid', use_bias=False, name="OutputMask")(upsample)

# DeelLabV3+ Model
model = Model(InputL, PredMask, name="DeepLabV3-Plus")
model.summary()
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5
94765736/94765736 [==============================] - 0s 0us/step
Model: "DeepLabV3-Plus"
__________________________________________________________________________________________________
 Layer (type)                Output Shape                 Param #   Connected to                  
==================================================================================================
 InputLayer (InputLayer)     [(None, 128, 128, 3)]        0         []                            
                                                                                                  
 conv1_pad (ZeroPadding2D)   (None, 134, 134, 3)          0         ['InputLayer[0][0]']          
                                                                                                  
 conv1_conv (Conv2D)         (None, 64, 64, 64)           9472      ['conv1_pad[0][0]']           
                                                                                                  
 conv1_bn (BatchNormalizati  (None, 64, 64, 64)           256       ['conv1_conv[0][0]']          
 on)                                                                                              
                                                                                                  
 conv1_relu (Activation)     (None, 64, 64, 64)           0         ['conv1_bn[0][0]']            
                                                                                                  
 pool1_pad (ZeroPadding2D)   (None, 66, 66, 64)           0         ['conv1_relu[0][0]']          
                                                                                                  
 pool1_pool (MaxPooling2D)   (None, 32, 32, 64)           0         ['pool1_pad[0][0]']           
                                                                                                  
 conv2_block1_1_conv (Conv2  (None, 32, 32, 64)           4160      ['pool1_pool[0][0]']          
 D)                                                                                               
                                                                                                  
 conv2_block1_1_bn (BatchNo  (None, 32, 32, 64)           256       ['conv2_block1_1_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv2_block1_1_relu (Activ  (None, 32, 32, 64)           0         ['conv2_block1_1_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv2_block1_2_conv (Conv2  (None, 32, 32, 64)           36928     ['conv2_block1_1_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv2_block1_2_bn (BatchNo  (None, 32, 32, 64)           256       ['conv2_block1_2_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv2_block1_2_relu (Activ  (None, 32, 32, 64)           0         ['conv2_block1_2_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv2_block1_0_conv (Conv2  (None, 32, 32, 256)          16640     ['pool1_pool[0][0]']          
 D)                                                                                               
                                                                                                  
 conv2_block1_3_conv (Conv2  (None, 32, 32, 256)          16640     ['conv2_block1_2_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv2_block1_0_bn (BatchNo  (None, 32, 32, 256)          1024      ['conv2_block1_0_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv2_block1_3_bn (BatchNo  (None, 32, 32, 256)          1024      ['conv2_block1_3_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv2_block1_add (Add)      (None, 32, 32, 256)          0         ['conv2_block1_0_bn[0][0]',   
                                                                     'conv2_block1_3_bn[0][0]']   
                                                                                                  
 conv2_block1_out (Activati  (None, 32, 32, 256)          0         ['conv2_block1_add[0][0]']    
 on)                                                                                              
                                                                                                  
 conv2_block2_1_conv (Conv2  (None, 32, 32, 64)           16448     ['conv2_block1_out[0][0]']    
 D)                                                                                               
                                                                                                  
 conv2_block2_1_bn (BatchNo  (None, 32, 32, 64)           256       ['conv2_block2_1_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv2_block2_1_relu (Activ  (None, 32, 32, 64)           0         ['conv2_block2_1_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv2_block2_2_conv (Conv2  (None, 32, 32, 64)           36928     ['conv2_block2_1_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv2_block2_2_bn (BatchNo  (None, 32, 32, 64)           256       ['conv2_block2_2_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv2_block2_2_relu (Activ  (None, 32, 32, 64)           0         ['conv2_block2_2_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv2_block2_3_conv (Conv2  (None, 32, 32, 256)          16640     ['conv2_block2_2_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv2_block2_3_bn (BatchNo  (None, 32, 32, 256)          1024      ['conv2_block2_3_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv2_block2_add (Add)      (None, 32, 32, 256)          0         ['conv2_block1_out[0][0]',    
                                                                     'conv2_block2_3_bn[0][0]']   
                                                                                                  
 conv2_block2_out (Activati  (None, 32, 32, 256)          0         ['conv2_block2_add[0][0]']    
 on)                                                                                              
                                                                                                  
 conv2_block3_1_conv (Conv2  (None, 32, 32, 64)           16448     ['conv2_block2_out[0][0]']    
 D)                                                                                               
                                                                                                  
 conv2_block3_1_bn (BatchNo  (None, 32, 32, 64)           256       ['conv2_block3_1_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv2_block3_1_relu (Activ  (None, 32, 32, 64)           0         ['conv2_block3_1_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv2_block3_2_conv (Conv2  (None, 32, 32, 64)           36928     ['conv2_block3_1_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv2_block3_2_bn (BatchNo  (None, 32, 32, 64)           256       ['conv2_block3_2_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv2_block3_2_relu (Activ  (None, 32, 32, 64)           0         ['conv2_block3_2_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv2_block3_3_conv (Conv2  (None, 32, 32, 256)          16640     ['conv2_block3_2_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv2_block3_3_bn (BatchNo  (None, 32, 32, 256)          1024      ['conv2_block3_3_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv2_block3_add (Add)      (None, 32, 32, 256)          0         ['conv2_block2_out[0][0]',    
                                                                     'conv2_block3_3_bn[0][0]']   
                                                                                                  
 conv2_block3_out (Activati  (None, 32, 32, 256)          0         ['conv2_block3_add[0][0]']    
 on)                                                                                              
                                                                                                  
 conv3_block1_1_conv (Conv2  (None, 16, 16, 128)          32896     ['conv2_block3_out[0][0]']    
 D)                                                                                               
                                                                                                  
 conv3_block1_1_bn (BatchNo  (None, 16, 16, 128)          512       ['conv3_block1_1_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv3_block1_1_relu (Activ  (None, 16, 16, 128)          0         ['conv3_block1_1_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv3_block1_2_conv (Conv2  (None, 16, 16, 128)          147584    ['conv3_block1_1_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv3_block1_2_bn (BatchNo  (None, 16, 16, 128)          512       ['conv3_block1_2_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv3_block1_2_relu (Activ  (None, 16, 16, 128)          0         ['conv3_block1_2_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv3_block1_0_conv (Conv2  (None, 16, 16, 512)          131584    ['conv2_block3_out[0][0]']    
 D)                                                                                               
                                                                                                  
 conv3_block1_3_conv (Conv2  (None, 16, 16, 512)          66048     ['conv3_block1_2_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv3_block1_0_bn (BatchNo  (None, 16, 16, 512)          2048      ['conv3_block1_0_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv3_block1_3_bn (BatchNo  (None, 16, 16, 512)          2048      ['conv3_block1_3_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv3_block1_add (Add)      (None, 16, 16, 512)          0         ['conv3_block1_0_bn[0][0]',   
                                                                     'conv3_block1_3_bn[0][0]']   
                                                                                                  
 conv3_block1_out (Activati  (None, 16, 16, 512)          0         ['conv3_block1_add[0][0]']    
 on)                                                                                              
                                                                                                  
 conv3_block2_1_conv (Conv2  (None, 16, 16, 128)          65664     ['conv3_block1_out[0][0]']    
 D)                                                                                               
                                                                                                  
 conv3_block2_1_bn (BatchNo  (None, 16, 16, 128)          512       ['conv3_block2_1_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv3_block2_1_relu (Activ  (None, 16, 16, 128)          0         ['conv3_block2_1_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv3_block2_2_conv (Conv2  (None, 16, 16, 128)          147584    ['conv3_block2_1_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv3_block2_2_bn (BatchNo  (None, 16, 16, 128)          512       ['conv3_block2_2_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv3_block2_2_relu (Activ  (None, 16, 16, 128)          0         ['conv3_block2_2_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv3_block2_3_conv (Conv2  (None, 16, 16, 512)          66048     ['conv3_block2_2_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv3_block2_3_bn (BatchNo  (None, 16, 16, 512)          2048      ['conv3_block2_3_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv3_block2_add (Add)      (None, 16, 16, 512)          0         ['conv3_block1_out[0][0]',    
                                                                     'conv3_block2_3_bn[0][0]']   
                                                                                                  
 conv3_block2_out (Activati  (None, 16, 16, 512)          0         ['conv3_block2_add[0][0]']    
 on)                                                                                              
                                                                                                  
 conv3_block3_1_conv (Conv2  (None, 16, 16, 128)          65664     ['conv3_block2_out[0][0]']    
 D)                                                                                               
                                                                                                  
 conv3_block3_1_bn (BatchNo  (None, 16, 16, 128)          512       ['conv3_block3_1_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv3_block3_1_relu (Activ  (None, 16, 16, 128)          0         ['conv3_block3_1_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv3_block3_2_conv (Conv2  (None, 16, 16, 128)          147584    ['conv3_block3_1_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv3_block3_2_bn (BatchNo  (None, 16, 16, 128)          512       ['conv3_block3_2_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv3_block3_2_relu (Activ  (None, 16, 16, 128)          0         ['conv3_block3_2_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv3_block3_3_conv (Conv2  (None, 16, 16, 512)          66048     ['conv3_block3_2_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv3_block3_3_bn (BatchNo  (None, 16, 16, 512)          2048      ['conv3_block3_3_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv3_block3_add (Add)      (None, 16, 16, 512)          0         ['conv3_block2_out[0][0]',    
                                                                     'conv3_block3_3_bn[0][0]']   
                                                                                                  
 conv3_block3_out (Activati  (None, 16, 16, 512)          0         ['conv3_block3_add[0][0]']    
 on)                                                                                              
                                                                                                  
 conv3_block4_1_conv (Conv2  (None, 16, 16, 128)          65664     ['conv3_block3_out[0][0]']    
 D)                                                                                               
                                                                                                  
 conv3_block4_1_bn (BatchNo  (None, 16, 16, 128)          512       ['conv3_block4_1_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv3_block4_1_relu (Activ  (None, 16, 16, 128)          0         ['conv3_block4_1_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv3_block4_2_conv (Conv2  (None, 16, 16, 128)          147584    ['conv3_block4_1_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv3_block4_2_bn (BatchNo  (None, 16, 16, 128)          512       ['conv3_block4_2_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv3_block4_2_relu (Activ  (None, 16, 16, 128)          0         ['conv3_block4_2_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv3_block4_3_conv (Conv2  (None, 16, 16, 512)          66048     ['conv3_block4_2_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv3_block4_3_bn (BatchNo  (None, 16, 16, 512)          2048      ['conv3_block4_3_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv3_block4_add (Add)      (None, 16, 16, 512)          0         ['conv3_block3_out[0][0]',    
                                                                     'conv3_block4_3_bn[0][0]']   
                                                                                                  
 conv3_block4_out (Activati  (None, 16, 16, 512)          0         ['conv3_block4_add[0][0]']    
 on)                                                                                              
                                                                                                  
 conv4_block1_1_conv (Conv2  (None, 8, 8, 256)            131328    ['conv3_block4_out[0][0]']    
 D)                                                                                               
                                                                                                  
 conv4_block1_1_bn (BatchNo  (None, 8, 8, 256)            1024      ['conv4_block1_1_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block1_1_relu (Activ  (None, 8, 8, 256)            0         ['conv4_block1_1_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv4_block1_2_conv (Conv2  (None, 8, 8, 256)            590080    ['conv4_block1_1_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv4_block1_2_bn (BatchNo  (None, 8, 8, 256)            1024      ['conv4_block1_2_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block1_2_relu (Activ  (None, 8, 8, 256)            0         ['conv4_block1_2_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv4_block1_0_conv (Conv2  (None, 8, 8, 1024)           525312    ['conv3_block4_out[0][0]']    
 D)                                                                                               
                                                                                                  
 conv4_block1_3_conv (Conv2  (None, 8, 8, 1024)           263168    ['conv4_block1_2_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv4_block1_0_bn (BatchNo  (None, 8, 8, 1024)           4096      ['conv4_block1_0_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block1_3_bn (BatchNo  (None, 8, 8, 1024)           4096      ['conv4_block1_3_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block1_add (Add)      (None, 8, 8, 1024)           0         ['conv4_block1_0_bn[0][0]',   
                                                                     'conv4_block1_3_bn[0][0]']   
                                                                                                  
 conv4_block1_out (Activati  (None, 8, 8, 1024)           0         ['conv4_block1_add[0][0]']    
 on)                                                                                              
                                                                                                  
 conv4_block2_1_conv (Conv2  (None, 8, 8, 256)            262400    ['conv4_block1_out[0][0]']    
 D)                                                                                               
                                                                                                  
 conv4_block2_1_bn (BatchNo  (None, 8, 8, 256)            1024      ['conv4_block2_1_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block2_1_relu (Activ  (None, 8, 8, 256)            0         ['conv4_block2_1_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv4_block2_2_conv (Conv2  (None, 8, 8, 256)            590080    ['conv4_block2_1_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv4_block2_2_bn (BatchNo  (None, 8, 8, 256)            1024      ['conv4_block2_2_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block2_2_relu (Activ  (None, 8, 8, 256)            0         ['conv4_block2_2_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv4_block2_3_conv (Conv2  (None, 8, 8, 1024)           263168    ['conv4_block2_2_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv4_block2_3_bn (BatchNo  (None, 8, 8, 1024)           4096      ['conv4_block2_3_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block2_add (Add)      (None, 8, 8, 1024)           0         ['conv4_block1_out[0][0]',    
                                                                     'conv4_block2_3_bn[0][0]']   
                                                                                                  
 conv4_block2_out (Activati  (None, 8, 8, 1024)           0         ['conv4_block2_add[0][0]']    
 on)                                                                                              
                                                                                                  
 conv4_block3_1_conv (Conv2  (None, 8, 8, 256)            262400    ['conv4_block2_out[0][0]']    
 D)                                                                                               
                                                                                                  
 conv4_block3_1_bn (BatchNo  (None, 8, 8, 256)            1024      ['conv4_block3_1_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block3_1_relu (Activ  (None, 8, 8, 256)            0         ['conv4_block3_1_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv4_block3_2_conv (Conv2  (None, 8, 8, 256)            590080    ['conv4_block3_1_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv4_block3_2_bn (BatchNo  (None, 8, 8, 256)            1024      ['conv4_block3_2_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block3_2_relu (Activ  (None, 8, 8, 256)            0         ['conv4_block3_2_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv4_block3_3_conv (Conv2  (None, 8, 8, 1024)           263168    ['conv4_block3_2_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv4_block3_3_bn (BatchNo  (None, 8, 8, 1024)           4096      ['conv4_block3_3_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block3_add (Add)      (None, 8, 8, 1024)           0         ['conv4_block2_out[0][0]',    
                                                                     'conv4_block3_3_bn[0][0]']   
                                                                                                  
 conv4_block3_out (Activati  (None, 8, 8, 1024)           0         ['conv4_block3_add[0][0]']    
 on)                                                                                              
                                                                                                  
 conv4_block4_1_conv (Conv2  (None, 8, 8, 256)            262400    ['conv4_block3_out[0][0]']    
 D)                                                                                               
                                                                                                  
 conv4_block4_1_bn (BatchNo  (None, 8, 8, 256)            1024      ['conv4_block4_1_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block4_1_relu (Activ  (None, 8, 8, 256)            0         ['conv4_block4_1_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv4_block4_2_conv (Conv2  (None, 8, 8, 256)            590080    ['conv4_block4_1_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv4_block4_2_bn (BatchNo  (None, 8, 8, 256)            1024      ['conv4_block4_2_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block4_2_relu (Activ  (None, 8, 8, 256)            0         ['conv4_block4_2_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv4_block4_3_conv (Conv2  (None, 8, 8, 1024)           263168    ['conv4_block4_2_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv4_block4_3_bn (BatchNo  (None, 8, 8, 1024)           4096      ['conv4_block4_3_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block4_add (Add)      (None, 8, 8, 1024)           0         ['conv4_block3_out[0][0]',    
                                                                     'conv4_block4_3_bn[0][0]']   
                                                                                                  
 conv4_block4_out (Activati  (None, 8, 8, 1024)           0         ['conv4_block4_add[0][0]']    
 on)                                                                                              
                                                                                                  
 conv4_block5_1_conv (Conv2  (None, 8, 8, 256)            262400    ['conv4_block4_out[0][0]']    
 D)                                                                                               
                                                                                                  
 conv4_block5_1_bn (BatchNo  (None, 8, 8, 256)            1024      ['conv4_block5_1_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block5_1_relu (Activ  (None, 8, 8, 256)            0         ['conv4_block5_1_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv4_block5_2_conv (Conv2  (None, 8, 8, 256)            590080    ['conv4_block5_1_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv4_block5_2_bn (BatchNo  (None, 8, 8, 256)            1024      ['conv4_block5_2_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block5_2_relu (Activ  (None, 8, 8, 256)            0         ['conv4_block5_2_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv4_block5_3_conv (Conv2  (None, 8, 8, 1024)           263168    ['conv4_block5_2_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv4_block5_3_bn (BatchNo  (None, 8, 8, 1024)           4096      ['conv4_block5_3_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block5_add (Add)      (None, 8, 8, 1024)           0         ['conv4_block4_out[0][0]',    
                                                                     'conv4_block5_3_bn[0][0]']   
                                                                                                  
 conv4_block5_out (Activati  (None, 8, 8, 1024)           0         ['conv4_block5_add[0][0]']    
 on)                                                                                              
                                                                                                  
 conv4_block6_1_conv (Conv2  (None, 8, 8, 256)            262400    ['conv4_block5_out[0][0]']    
 D)                                                                                               
                                                                                                  
 conv4_block6_1_bn (BatchNo  (None, 8, 8, 256)            1024      ['conv4_block6_1_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block6_1_relu (Activ  (None, 8, 8, 256)            0         ['conv4_block6_1_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 conv4_block6_2_conv (Conv2  (None, 8, 8, 256)            590080    ['conv4_block6_1_relu[0][0]'] 
 D)                                                                                               
                                                                                                  
 conv4_block6_2_bn (BatchNo  (None, 8, 8, 256)            1024      ['conv4_block6_2_conv[0][0]'] 
 rmalization)                                                                                     
                                                                                                  
 conv4_block6_2_relu (Activ  (None, 8, 8, 256)            0         ['conv4_block6_2_bn[0][0]']   
 ation)                                                                                           
                                                                                                  
 ASPP-AvgPool2D (AveragePoo  (None, 1, 1, 256)            0         ['conv4_block6_2_relu[0][0]'] 
 ling2D)                                                                                          
                                                                                                  
 ASPP-ConvBlock-1 (ConvBloc  (None, 1, 1, 256)            66560     ['ASPP-AvgPool2D[0][0]']      
 k)                                                                                               
                                                                                                  
 ASPP-UpSampling (UpSamplin  (None, 8, 8, 256)            0         ['ASPP-ConvBlock-1[0][0]']    
 g2D)                                                                                             
                                                                                                  
 ASPP-Conv-1 (ConvBlock)     (None, 8, 8, 256)            66560     ['conv4_block6_2_relu[0][0]'] 
                                                                                                  
 ASPP-Conv-6 (ConvBlock)     (None, 8, 8, 256)            590848    ['conv4_block6_2_relu[0][0]'] 
                                                                                                  
 ASPP-Conv-12 (ConvBlock)    (None, 8, 8, 256)            590848    ['conv4_block6_2_relu[0][0]'] 
                                                                                                  
 ASPP-Conv-18 (ConvBlock)    (None, 8, 8, 256)            590848    ['conv4_block6_2_relu[0][0]'] 
                                                                                                  
 ASPP-Concat (Concatenate)   (None, 8, 8, 1280)           0         ['ASPP-UpSampling[0][0]',     
                                                                     'ASPP-Conv-1[0][0]',         
                                                                     'ASPP-Conv-6[0][0]',         
                                                                     'ASPP-Conv-12[0][0]',        
                                                                     'ASPP-Conv-18[0][0]']        
                                                                                                  
 ASPP-Net (ConvBlock)        (None, 8, 8, 256)            328704    ['ASPP-Concat[0][0]']         
                                                                                                  
 AtrousSpatial (UpSampling2  (None, 32, 32, 256)          0         ['ASPP-Net[0][0]']            
 D)                                                                                               
                                                                                                  
 LLF-ConvBlock (ConvBlock)   (None, 32, 32, 48)           3264      ['conv2_block3_2_relu[0][0]'] 
                                                                                                  
 Combine-LLF-ASPP (Concaten  (None, 32, 32, 304)          0         ['AtrousSpatial[0][0]',       
 ate)                                                                'LLF-ConvBlock[0][0]']       
                                                                                                  
 Top-ConvBlock-1 (ConvBlock  (None, 32, 32, 256)          701440    ['Combine-LLF-ASPP[0][0]']    
 )                                                                                                
                                                                                                  
 Top-ConvBlock-2 (ConvBlock  (None, 32, 32, 256)          590848    ['Top-ConvBlock-1[0][0]']     
 )                                                                                                
                                                                                                  
 Top-UpSample (UpSampling2D  (None, 128, 128, 256)        0         ['Top-ConvBlock-2[0][0]']     
 )                                                                                                
                                                                                                  
 OutputMask (Conv2D)         (None, 128, 128, 1)          2304      ['Top-UpSample[0][0]']        
                                                                                                  
==================================================================================================
Total params: 11854144 (45.22 MB)
Trainable params: 11821408 (45.10 MB)
Non-trainable params: 32736 (127.88 KB)
__________________________________________________________________________________________________
In [ ]:
plot_model(model, "DeepLabV3+.png", show_shapes=True, dpi=96)
Out[ ]:

Training¶

In [ ]:
optimizer = tf.keras.optimizers.Adam(learning_rate=LR)
model.compile(loss='binary_crossentropy', optimizer=optimizer,metrics=['accuracy',dice_loss,iou])

# Callbacks
callbacks = [ModelCheckpoint("DeepLab_100.h5")]

# Training
history = model.fit(colored_scan_train,mask_train, validation_data = (colored_scan_test, mask_test),
          epochs=100, callbacks=callbacks)
Epoch 1/100
19/19 [==============================] - 50s 589ms/step - loss: 0.2457 - accuracy: 0.8689 - dice_loss: 0.5511 - iou: 0.2998 - val_loss: 0.4959 - val_accuracy: 0.7347 - val_dice_loss: 0.7931 - val_iou: 0.1205
Epoch 2/100
19/19 [==============================] - 5s 269ms/step - loss: 0.1242 - accuracy: 0.9282 - dice_loss: 0.3799 - iou: 0.4521 - val_loss: 0.2839 - val_accuracy: 0.8754 - val_dice_loss: 0.8457 - val_iou: 0.0872
Epoch 3/100
19/19 [==============================] - 5s 273ms/step - loss: 0.0928 - accuracy: 0.9426 - dice_loss: 0.2848 - iou: 0.5581 - val_loss: 0.2492 - val_accuracy: 0.8989 - val_dice_loss: 0.8477 - val_iou: 0.0857
Epoch 4/100
19/19 [==============================] - 5s 276ms/step - loss: 0.0714 - accuracy: 0.9489 - dice_loss: 0.2452 - iou: 0.6130 - val_loss: 0.2729 - val_accuracy: 0.8621 - val_dice_loss: 0.8265 - val_iou: 0.0985
Epoch 5/100
19/19 [==============================] - 5s 272ms/step - loss: 0.0794 - accuracy: 0.9452 - dice_loss: 0.2632 - iou: 0.5859 - val_loss: 0.2507 - val_accuracy: 0.8991 - val_dice_loss: 0.8820 - val_iou: 0.0644
Epoch 6/100
19/19 [==============================] - 7s 361ms/step - loss: 0.0641 - accuracy: 0.9514 - dice_loss: 0.1969 - iou: 0.6715 - val_loss: 0.3199 - val_accuracy: 0.8601 - val_dice_loss: 0.8744 - val_iou: 0.0674
Epoch 7/100
19/19 [==============================] - 7s 392ms/step - loss: 0.0598 - accuracy: 0.9525 - dice_loss: 0.1875 - iou: 0.6846 - val_loss: 0.5168 - val_accuracy: 0.8991 - val_dice_loss: 0.9942 - val_iou: 0.0024
Epoch 8/100
19/19 [==============================] - 7s 363ms/step - loss: 0.0606 - accuracy: 0.9522 - dice_loss: 0.2027 - iou: 0.6658 - val_loss: 0.2699 - val_accuracy: 0.8991 - val_dice_loss: 0.9254 - val_iou: 0.0393
Epoch 9/100
19/19 [==============================] - 6s 296ms/step - loss: 0.0568 - accuracy: 0.9535 - dice_loss: 0.1761 - iou: 0.7013 - val_loss: 0.3418 - val_accuracy: 0.8991 - val_dice_loss: 0.9619 - val_iou: 0.0192
Epoch 10/100
19/19 [==============================] - 6s 322ms/step - loss: 0.0594 - accuracy: 0.9537 - dice_loss: 0.1897 - iou: 0.6834 - val_loss: 0.3388 - val_accuracy: 0.8991 - val_dice_loss: 0.9622 - val_iou: 0.0191
Epoch 11/100
19/19 [==============================] - 6s 316ms/step - loss: 0.0539 - accuracy: 0.9544 - dice_loss: 0.1852 - iou: 0.6916 - val_loss: 0.4879 - val_accuracy: 0.8991 - val_dice_loss: 0.9925 - val_iou: 0.0033
Epoch 12/100
19/19 [==============================] - 6s 299ms/step - loss: 0.0589 - accuracy: 0.9527 - dice_loss: 0.1944 - iou: 0.6774 - val_loss: 0.4967 - val_accuracy: 0.6598 - val_dice_loss: 0.8199 - val_iou: 0.1033
Epoch 13/100
19/19 [==============================] - 6s 332ms/step - loss: 0.0501 - accuracy: 0.9561 - dice_loss: 0.1562 - iou: 0.7302 - val_loss: 0.3553 - val_accuracy: 0.7577 - val_dice_loss: 0.8100 - val_iou: 0.1102
Epoch 14/100
19/19 [==============================] - 5s 268ms/step - loss: 0.0398 - accuracy: 0.9593 - dice_loss: 0.1270 - iou: 0.7749 - val_loss: 0.2808 - val_accuracy: 0.8589 - val_dice_loss: 0.8201 - val_iou: 0.1032
Epoch 15/100
19/19 [==============================] - 5s 278ms/step - loss: 0.0348 - accuracy: 0.9607 - dice_loss: 0.1133 - iou: 0.7969 - val_loss: 0.2608 - val_accuracy: 0.8988 - val_dice_loss: 0.8509 - val_iou: 0.0837
Epoch 16/100
19/19 [==============================] - 5s 275ms/step - loss: 0.0330 - accuracy: 0.9615 - dice_loss: 0.1058 - iou: 0.8089 - val_loss: 0.7139 - val_accuracy: 0.5877 - val_dice_loss: 0.8137 - val_iou: 0.1069
Epoch 17/100
19/19 [==============================] - 5s 268ms/step - loss: 0.0304 - accuracy: 0.9622 - dice_loss: 0.0956 - iou: 0.8256 - val_loss: 0.4891 - val_accuracy: 0.7021 - val_dice_loss: 0.7969 - val_iou: 0.1187
Epoch 18/100
19/19 [==============================] - 5s 284ms/step - loss: 0.0274 - accuracy: 0.9633 - dice_loss: 0.0925 - iou: 0.8317 - val_loss: 0.3934 - val_accuracy: 0.7481 - val_dice_loss: 0.7951 - val_iou: 0.1203
Epoch 19/100
19/19 [==============================] - 6s 297ms/step - loss: 0.0274 - accuracy: 0.9632 - dice_loss: 0.0902 - iou: 0.8346 - val_loss: 0.4453 - val_accuracy: 0.7234 - val_dice_loss: 0.7919 - val_iou: 0.1220
Epoch 20/100
19/19 [==============================] - 10s 561ms/step - loss: 0.0249 - accuracy: 0.9640 - dice_loss: 0.0800 - iou: 0.8521 - val_loss: 0.3924 - val_accuracy: 0.7626 - val_dice_loss: 0.7847 - val_iou: 0.1273
Epoch 21/100
19/19 [==============================] - 5s 276ms/step - loss: 0.0352 - accuracy: 0.9607 - dice_loss: 0.1095 - iou: 0.8030 - val_loss: 0.3424 - val_accuracy: 0.7721 - val_dice_loss: 0.7874 - val_iou: 0.1252
Epoch 22/100
19/19 [==============================] - 5s 286ms/step - loss: 0.0361 - accuracy: 0.9607 - dice_loss: 0.1190 - iou: 0.7899 - val_loss: 0.4270 - val_accuracy: 0.7256 - val_dice_loss: 0.7831 - val_iou: 0.1277
Epoch 23/100
19/19 [==============================] - 5s 272ms/step - loss: 0.0377 - accuracy: 0.9602 - dice_loss: 0.1206 - iou: 0.7852 - val_loss: 0.2409 - val_accuracy: 0.8919 - val_dice_loss: 0.8051 - val_iou: 0.1122
Epoch 24/100
19/19 [==============================] - 5s 270ms/step - loss: 0.0367 - accuracy: 0.9605 - dice_loss: 0.1168 - iou: 0.7915 - val_loss: 0.3029 - val_accuracy: 0.8571 - val_dice_loss: 0.8309 - val_iou: 0.0967
Epoch 25/100
19/19 [==============================] - 6s 293ms/step - loss: 0.0697 - accuracy: 0.9494 - dice_loss: 0.2003 - iou: 0.6688 - val_loss: 0.5844 - val_accuracy: 0.5722 - val_dice_loss: 0.8184 - val_iou: 0.1022
Epoch 26/100
19/19 [==============================] - 5s 269ms/step - loss: 0.0576 - accuracy: 0.9531 - dice_loss: 0.1798 - iou: 0.6958 - val_loss: 0.2985 - val_accuracy: 0.8387 - val_dice_loss: 0.7998 - val_iou: 0.1142
Epoch 27/100
19/19 [==============================] - 5s 293ms/step - loss: 0.0411 - accuracy: 0.9594 - dice_loss: 0.1284 - iou: 0.7729 - val_loss: 0.2817 - val_accuracy: 0.8959 - val_dice_loss: 0.8976 - val_iou: 0.0540
Epoch 28/100
19/19 [==============================] - 5s 279ms/step - loss: 0.0364 - accuracy: 0.9604 - dice_loss: 0.1181 - iou: 0.7894 - val_loss: 0.4065 - val_accuracy: 0.8991 - val_dice_loss: 0.9777 - val_iou: 0.0109
Epoch 29/100
19/19 [==============================] - 5s 270ms/step - loss: 0.0353 - accuracy: 0.9606 - dice_loss: 0.1148 - iou: 0.7943 - val_loss: 0.4763 - val_accuracy: 0.8991 - val_dice_loss: 0.9885 - val_iou: 0.0053
Epoch 30/100
19/19 [==============================] - 5s 291ms/step - loss: 0.0529 - accuracy: 0.9555 - dice_loss: 0.1682 - iou: 0.7191 - val_loss: 0.2552 - val_accuracy: 0.8805 - val_dice_loss: 0.7966 - val_iou: 0.1169
Epoch 31/100
19/19 [==============================] - 5s 270ms/step - loss: 0.0450 - accuracy: 0.9573 - dice_loss: 0.1489 - iou: 0.7420 - val_loss: 0.2462 - val_accuracy: 0.8781 - val_dice_loss: 0.7974 - val_iou: 0.1154
Epoch 32/100
19/19 [==============================] - 5s 276ms/step - loss: 0.0459 - accuracy: 0.9572 - dice_loss: 0.1389 - iou: 0.7565 - val_loss: 0.3433 - val_accuracy: 0.8991 - val_dice_loss: 0.9544 - val_iou: 0.0232
Epoch 33/100
19/19 [==============================] - 5s 281ms/step - loss: 0.0341 - accuracy: 0.9609 - dice_loss: 0.1107 - iou: 0.8008 - val_loss: 0.4407 - val_accuracy: 0.8991 - val_dice_loss: 0.9858 - val_iou: 0.0066
Epoch 34/100
19/19 [==============================] - 5s 269ms/step - loss: 0.0291 - accuracy: 0.9624 - dice_loss: 0.0975 - iou: 0.8228 - val_loss: 0.5510 - val_accuracy: 0.8991 - val_dice_loss: 0.9940 - val_iou: 0.0024
Epoch 35/100
19/19 [==============================] - 5s 282ms/step - loss: 0.0273 - accuracy: 0.9632 - dice_loss: 0.0904 - iou: 0.8343 - val_loss: 0.5070 - val_accuracy: 0.8999 - val_dice_loss: 0.9783 - val_iou: 0.0105
Epoch 36/100
19/19 [==============================] - 5s 271ms/step - loss: 0.0250 - accuracy: 0.9637 - dice_loss: 0.0816 - iou: 0.8494 - val_loss: 0.4983 - val_accuracy: 0.8996 - val_dice_loss: 0.9806 - val_iou: 0.0092
Epoch 37/100
19/19 [==============================] - 5s 269ms/step - loss: 0.0236 - accuracy: 0.9644 - dice_loss: 0.0782 - iou: 0.8550 - val_loss: 0.4421 - val_accuracy: 0.9000 - val_dice_loss: 0.9640 - val_iou: 0.0179
Epoch 38/100
19/19 [==============================] - 5s 275ms/step - loss: 0.0216 - accuracy: 0.9648 - dice_loss: 0.0691 - iou: 0.8708 - val_loss: 0.4681 - val_accuracy: 0.9002 - val_dice_loss: 0.9605 - val_iou: 0.0198
Epoch 39/100
19/19 [==============================] - 10s 563ms/step - loss: 0.0203 - accuracy: 0.9652 - dice_loss: 0.0690 - iou: 0.8716 - val_loss: 0.4501 - val_accuracy: 0.9007 - val_dice_loss: 0.9488 - val_iou: 0.0260
Epoch 40/100
19/19 [==============================] - 5s 269ms/step - loss: 0.0362 - accuracy: 0.9608 - dice_loss: 0.1016 - iou: 0.8163 - val_loss: 0.5047 - val_accuracy: 0.8773 - val_dice_loss: 0.7434 - val_iou: 0.1480
Epoch 41/100
19/19 [==============================] - 6s 294ms/step - loss: 0.0358 - accuracy: 0.9606 - dice_loss: 0.1127 - iou: 0.7980 - val_loss: 0.3376 - val_accuracy: 0.8982 - val_dice_loss: 0.8806 - val_iou: 0.0636
Epoch 42/100
19/19 [==============================] - 5s 288ms/step - loss: 0.0303 - accuracy: 0.9622 - dice_loss: 0.0987 - iou: 0.8206 - val_loss: 0.2533 - val_accuracy: 0.9019 - val_dice_loss: 0.8453 - val_iou: 0.0866
Epoch 43/100
19/19 [==============================] - 6s 328ms/step - loss: 0.0269 - accuracy: 0.9633 - dice_loss: 0.0837 - iou: 0.8456 - val_loss: 0.2748 - val_accuracy: 0.9068 - val_dice_loss: 0.7298 - val_iou: 0.1564
Epoch 44/100
19/19 [==============================] - 6s 338ms/step - loss: 0.0240 - accuracy: 0.9642 - dice_loss: 0.0841 - iou: 0.8459 - val_loss: 0.3602 - val_accuracy: 0.9066 - val_dice_loss: 0.8239 - val_iou: 0.1000
Epoch 45/100
19/19 [==============================] - 5s 271ms/step - loss: 0.0371 - accuracy: 0.9605 - dice_loss: 0.1177 - iou: 0.7905 - val_loss: 0.2773 - val_accuracy: 0.9087 - val_dice_loss: 0.6740 - val_iou: 0.1955
Epoch 46/100
19/19 [==============================] - 5s 272ms/step - loss: 0.0745 - accuracy: 0.9486 - dice_loss: 0.2244 - iou: 0.6484 - val_loss: 0.2178 - val_accuracy: 0.8918 - val_dice_loss: 0.6614 - val_iou: 0.2081
Epoch 47/100
19/19 [==============================] - 5s 283ms/step - loss: 0.0801 - accuracy: 0.9455 - dice_loss: 0.2654 - iou: 0.5843 - val_loss: 0.1977 - val_accuracy: 0.9116 - val_dice_loss: 0.5845 - val_iou: 0.2729
Epoch 48/100
19/19 [==============================] - 5s 268ms/step - loss: 0.0438 - accuracy: 0.9582 - dice_loss: 0.1349 - iou: 0.7625 - val_loss: 0.1715 - val_accuracy: 0.9235 - val_dice_loss: 0.5141 - val_iou: 0.3255
Epoch 49/100
19/19 [==============================] - 11s 586ms/step - loss: 0.0380 - accuracy: 0.9598 - dice_loss: 0.1184 - iou: 0.7886 - val_loss: 0.1809 - val_accuracy: 0.9248 - val_dice_loss: 0.4555 - val_iou: 0.3767
Epoch 50/100
19/19 [==============================] - 5s 268ms/step - loss: 0.0321 - accuracy: 0.9617 - dice_loss: 0.1015 - iou: 0.8159 - val_loss: 0.1726 - val_accuracy: 0.9267 - val_dice_loss: 0.4686 - val_iou: 0.3738
Epoch 51/100
19/19 [==============================] - 10s 536ms/step - loss: 0.0261 - accuracy: 0.9635 - dice_loss: 0.0879 - iou: 0.8388 - val_loss: 0.1857 - val_accuracy: 0.9291 - val_dice_loss: 0.4660 - val_iou: 0.3802
Epoch 52/100
19/19 [==============================] - 5s 274ms/step - loss: 0.0283 - accuracy: 0.9631 - dice_loss: 0.0870 - iou: 0.8405 - val_loss: 0.2408 - val_accuracy: 0.9309 - val_dice_loss: 0.3961 - val_iou: 0.4329
Epoch 53/100
19/19 [==============================] - 5s 274ms/step - loss: 0.0346 - accuracy: 0.9609 - dice_loss: 0.1110 - iou: 0.8018 - val_loss: 0.2408 - val_accuracy: 0.9189 - val_dice_loss: 0.4744 - val_iou: 0.3599
Epoch 54/100
19/19 [==============================] - 5s 273ms/step - loss: 0.0297 - accuracy: 0.9622 - dice_loss: 0.0938 - iou: 0.8286 - val_loss: 0.1963 - val_accuracy: 0.9280 - val_dice_loss: 0.4823 - val_iou: 0.3658
Epoch 55/100
19/19 [==============================] - 6s 297ms/step - loss: 0.0291 - accuracy: 0.9624 - dice_loss: 0.0954 - iou: 0.8264 - val_loss: 0.2062 - val_accuracy: 0.9282 - val_dice_loss: 0.4167 - val_iou: 0.4124
Epoch 56/100
19/19 [==============================] - 5s 274ms/step - loss: 0.0248 - accuracy: 0.9639 - dice_loss: 0.0810 - iou: 0.8502 - val_loss: 0.1871 - val_accuracy: 0.9358 - val_dice_loss: 0.4424 - val_iou: 0.4019
Epoch 57/100
19/19 [==============================] - 9s 463ms/step - loss: 0.0217 - accuracy: 0.9649 - dice_loss: 0.0710 - iou: 0.8675 - val_loss: 0.1677 - val_accuracy: 0.9380 - val_dice_loss: 0.3799 - val_iou: 0.4611
Epoch 58/100
19/19 [==============================] - 5s 265ms/step - loss: 0.0211 - accuracy: 0.9653 - dice_loss: 0.0670 - iou: 0.8745 - val_loss: 0.1820 - val_accuracy: 0.9385 - val_dice_loss: 0.3661 - val_iou: 0.4703
Epoch 59/100
19/19 [==============================] - 5s 278ms/step - loss: 0.0205 - accuracy: 0.9653 - dice_loss: 0.0649 - iou: 0.8782 - val_loss: 0.1636 - val_accuracy: 0.9399 - val_dice_loss: 0.3222 - val_iou: 0.5142
Epoch 60/100
19/19 [==============================] - 5s 270ms/step - loss: 0.0186 - accuracy: 0.9658 - dice_loss: 0.0611 - iou: 0.8850 - val_loss: 0.1784 - val_accuracy: 0.9390 - val_dice_loss: 0.3176 - val_iou: 0.5183
Epoch 61/100
19/19 [==============================] - 5s 269ms/step - loss: 0.0180 - accuracy: 0.9659 - dice_loss: 0.0603 - iou: 0.8864 - val_loss: 0.1771 - val_accuracy: 0.9397 - val_dice_loss: 0.3724 - val_iou: 0.4670
Epoch 62/100
19/19 [==============================] - 6s 312ms/step - loss: 0.0195 - accuracy: 0.9656 - dice_loss: 0.0597 - iou: 0.8874 - val_loss: 0.1461 - val_accuracy: 0.9422 - val_dice_loss: 0.3834 - val_iou: 0.4671
Epoch 63/100
19/19 [==============================] - 5s 270ms/step - loss: 0.0183 - accuracy: 0.9658 - dice_loss: 0.0623 - iou: 0.8835 - val_loss: 0.1763 - val_accuracy: 0.9388 - val_dice_loss: 0.3055 - val_iou: 0.5320
Epoch 64/100
19/19 [==============================] - 5s 268ms/step - loss: 0.0256 - accuracy: 0.9637 - dice_loss: 0.0747 - iou: 0.8612 - val_loss: 0.1892 - val_accuracy: 0.9348 - val_dice_loss: 0.4250 - val_iou: 0.4159
Epoch 65/100
19/19 [==============================] - 11s 592ms/step - loss: 0.0247 - accuracy: 0.9639 - dice_loss: 0.0794 - iou: 0.8530 - val_loss: 0.1727 - val_accuracy: 0.9379 - val_dice_loss: 0.4047 - val_iou: 0.4427
Epoch 66/100
19/19 [==============================] - 6s 292ms/step - loss: 0.0223 - accuracy: 0.9648 - dice_loss: 0.0718 - iou: 0.8662 - val_loss: 0.1734 - val_accuracy: 0.9411 - val_dice_loss: 0.3799 - val_iou: 0.4688
Epoch 67/100
19/19 [==============================] - 5s 270ms/step - loss: 0.0193 - accuracy: 0.9656 - dice_loss: 0.0621 - iou: 0.8832 - val_loss: 0.1819 - val_accuracy: 0.9416 - val_dice_loss: 0.3904 - val_iou: 0.4620
Epoch 68/100
19/19 [==============================] - 5s 279ms/step - loss: 0.0171 - accuracy: 0.9663 - dice_loss: 0.0554 - iou: 0.8952 - val_loss: 0.1651 - val_accuracy: 0.9434 - val_dice_loss: 0.3673 - val_iou: 0.4887
Epoch 69/100
19/19 [==============================] - 5s 282ms/step - loss: 0.0164 - accuracy: 0.9665 - dice_loss: 0.0534 - iou: 0.8986 - val_loss: 0.1797 - val_accuracy: 0.9427 - val_dice_loss: 0.3688 - val_iou: 0.4821
Epoch 70/100
19/19 [==============================] - 5s 271ms/step - loss: 0.0150 - accuracy: 0.9669 - dice_loss: 0.0526 - iou: 0.9003 - val_loss: 0.1884 - val_accuracy: 0.9428 - val_dice_loss: 0.3550 - val_iou: 0.4935
Epoch 71/100
19/19 [==============================] - 5s 291ms/step - loss: 0.0164 - accuracy: 0.9667 - dice_loss: 0.0497 - iou: 0.9053 - val_loss: 0.1477 - val_accuracy: 0.9411 - val_dice_loss: 0.3693 - val_iou: 0.4837
Epoch 72/100
19/19 [==============================] - 5s 274ms/step - loss: 0.0150 - accuracy: 0.9669 - dice_loss: 0.0480 - iou: 0.9085 - val_loss: 0.1598 - val_accuracy: 0.9417 - val_dice_loss: 0.3685 - val_iou: 0.4829
Epoch 73/100
19/19 [==============================] - 5s 283ms/step - loss: 0.0139 - accuracy: 0.9672 - dice_loss: 0.0460 - iou: 0.9120 - val_loss: 0.1566 - val_accuracy: 0.9423 - val_dice_loss: 0.3616 - val_iou: 0.4919
Epoch 74/100
19/19 [==============================] - 5s 287ms/step - loss: 0.0139 - accuracy: 0.9673 - dice_loss: 0.0444 - iou: 0.9150 - val_loss: 0.1789 - val_accuracy: 0.9406 - val_dice_loss: 0.3674 - val_iou: 0.4792
Epoch 75/100
19/19 [==============================] - 5s 271ms/step - loss: 0.0141 - accuracy: 0.9672 - dice_loss: 0.0439 - iou: 0.9159 - val_loss: 0.1672 - val_accuracy: 0.9416 - val_dice_loss: 0.3613 - val_iou: 0.4901
Epoch 76/100
19/19 [==============================] - 5s 269ms/step - loss: 0.0133 - accuracy: 0.9674 - dice_loss: 0.0426 - iou: 0.9183 - val_loss: 0.1698 - val_accuracy: 0.9417 - val_dice_loss: 0.3469 - val_iou: 0.5004
Epoch 77/100
19/19 [==============================] - 5s 275ms/step - loss: 0.0134 - accuracy: 0.9674 - dice_loss: 0.0422 - iou: 0.9190 - val_loss: 0.1836 - val_accuracy: 0.9408 - val_dice_loss: 0.3645 - val_iou: 0.4829
Epoch 78/100
19/19 [==============================] - 5s 269ms/step - loss: 0.0139 - accuracy: 0.9672 - dice_loss: 0.0434 - iou: 0.9169 - val_loss: 0.1956 - val_accuracy: 0.9407 - val_dice_loss: 0.3701 - val_iou: 0.4765
Epoch 79/100
19/19 [==============================] - 11s 583ms/step - loss: 0.0135 - accuracy: 0.9674 - dice_loss: 0.0426 - iou: 0.9183 - val_loss: 0.1847 - val_accuracy: 0.9406 - val_dice_loss: 0.3517 - val_iou: 0.4960
Epoch 80/100
19/19 [==============================] - 5s 268ms/step - loss: 0.0143 - accuracy: 0.9672 - dice_loss: 0.0442 - iou: 0.9154 - val_loss: 0.1758 - val_accuracy: 0.9394 - val_dice_loss: 0.3771 - val_iou: 0.4759
Epoch 81/100
19/19 [==============================] - 5s 275ms/step - loss: 0.0133 - accuracy: 0.9674 - dice_loss: 0.0420 - iou: 0.9194 - val_loss: 0.1732 - val_accuracy: 0.9411 - val_dice_loss: 0.3597 - val_iou: 0.4927
Epoch 82/100
19/19 [==============================] - 5s 270ms/step - loss: 0.0151 - accuracy: 0.9670 - dice_loss: 0.0454 - iou: 0.9131 - val_loss: 0.2028 - val_accuracy: 0.9394 - val_dice_loss: 0.3404 - val_iou: 0.5002
Epoch 83/100
19/19 [==============================] - 5s 284ms/step - loss: 0.0138 - accuracy: 0.9673 - dice_loss: 0.0478 - iou: 0.9091 - val_loss: 0.2015 - val_accuracy: 0.9410 - val_dice_loss: 0.3533 - val_iou: 0.4927
Epoch 84/100
19/19 [==============================] - 5s 277ms/step - loss: 0.0161 - accuracy: 0.9665 - dice_loss: 0.0488 - iou: 0.9070 - val_loss: 0.1615 - val_accuracy: 0.9373 - val_dice_loss: 0.3894 - val_iou: 0.4649
Epoch 85/100
19/19 [==============================] - 5s 270ms/step - loss: 0.0176 - accuracy: 0.9661 - dice_loss: 0.0546 - iou: 0.8965 - val_loss: 0.1766 - val_accuracy: 0.9358 - val_dice_loss: 0.3193 - val_iou: 0.5210
Epoch 86/100
19/19 [==============================] - 5s 283ms/step - loss: 0.0165 - accuracy: 0.9664 - dice_loss: 0.0518 - iou: 0.9017 - val_loss: 0.1836 - val_accuracy: 0.9325 - val_dice_loss: 0.3943 - val_iou: 0.4547
Epoch 87/100
19/19 [==============================] - 5s 279ms/step - loss: 0.0147 - accuracy: 0.9670 - dice_loss: 0.0486 - iou: 0.9075 - val_loss: 0.1681 - val_accuracy: 0.9394 - val_dice_loss: 0.3629 - val_iou: 0.4857
Epoch 88/100
19/19 [==============================] - 5s 289ms/step - loss: 0.0128 - accuracy: 0.9676 - dice_loss: 0.0417 - iou: 0.9199 - val_loss: 0.1782 - val_accuracy: 0.9408 - val_dice_loss: 0.3585 - val_iou: 0.4938
Epoch 89/100
19/19 [==============================] - 5s 284ms/step - loss: 0.0122 - accuracy: 0.9677 - dice_loss: 0.0395 - iou: 0.9240 - val_loss: 0.1874 - val_accuracy: 0.9401 - val_dice_loss: 0.3602 - val_iou: 0.4908
Epoch 90/100
19/19 [==============================] - 5s 268ms/step - loss: 0.0127 - accuracy: 0.9676 - dice_loss: 0.0497 - iou: 0.9089 - val_loss: 0.1735 - val_accuracy: 0.9412 - val_dice_loss: 0.3649 - val_iou: 0.4853
Epoch 91/100
19/19 [==============================] - 5s 268ms/step - loss: 0.0195 - accuracy: 0.9656 - dice_loss: 0.0595 - iou: 0.8878 - val_loss: 0.2044 - val_accuracy: 0.9326 - val_dice_loss: 0.4010 - val_iou: 0.4512
Epoch 92/100
19/19 [==============================] - 5s 267ms/step - loss: 0.0183 - accuracy: 0.9659 - dice_loss: 0.0585 - iou: 0.8897 - val_loss: 0.1992 - val_accuracy: 0.9390 - val_dice_loss: 0.3714 - val_iou: 0.4791
Epoch 93/100
19/19 [==============================] - 5s 285ms/step - loss: 0.0180 - accuracy: 0.9660 - dice_loss: 0.0559 - iou: 0.8942 - val_loss: 0.2031 - val_accuracy: 0.9355 - val_dice_loss: 0.3710 - val_iou: 0.4781
Epoch 94/100
19/19 [==============================] - 5s 269ms/step - loss: 0.0340 - accuracy: 0.9613 - dice_loss: 0.0991 - iou: 0.8209 - val_loss: 0.3002 - val_accuracy: 0.9222 - val_dice_loss: 0.3261 - val_iou: 0.5308
Epoch 95/100
19/19 [==============================] - 5s 269ms/step - loss: 0.0437 - accuracy: 0.9580 - dice_loss: 0.1290 - iou: 0.7721 - val_loss: 0.2505 - val_accuracy: 0.8998 - val_dice_loss: 0.4137 - val_iou: 0.4187
Epoch 96/100
19/19 [==============================] - 5s 273ms/step - loss: 0.0412 - accuracy: 0.9596 - dice_loss: 0.1242 - iou: 0.7799 - val_loss: 0.2464 - val_accuracy: 0.9316 - val_dice_loss: 0.3443 - val_iou: 0.4895
Epoch 97/100
19/19 [==============================] - 5s 277ms/step - loss: 0.0311 - accuracy: 0.9620 - dice_loss: 0.1029 - iou: 0.8143 - val_loss: 0.1673 - val_accuracy: 0.9389 - val_dice_loss: 0.2589 - val_iou: 0.5890
Epoch 98/100
19/19 [==============================] - 5s 293ms/step - loss: 0.0255 - accuracy: 0.9638 - dice_loss: 0.0805 - iou: 0.8511 - val_loss: 0.1766 - val_accuracy: 0.9414 - val_dice_loss: 0.2113 - val_iou: 0.6549
Epoch 99/100
19/19 [==============================] - 5s 269ms/step - loss: 0.0203 - accuracy: 0.9653 - dice_loss: 0.0646 - iou: 0.8787 - val_loss: 0.1571 - val_accuracy: 0.9417 - val_dice_loss: 0.2090 - val_iou: 0.6569
Epoch 100/100
19/19 [==============================] - 5s 269ms/step - loss: 0.0173 - accuracy: 0.9662 - dice_loss: 0.0575 - iou: 0.8915 - val_loss: 0.1559 - val_accuracy: 0.9419 - val_dice_loss: 0.2243 - val_iou: 0.6345

Model Performence¶

In [ ]:
plt.figure(figsize=(20,5))

plt.subplot(1,4,1)
plt.title("Model Loss")
plt.plot(history.history['loss'], label="Training")
plt.plot(history.history['val_loss'], label="Validation")
plt.legend()
plt.grid()

plt.subplot(1,4,2)
plt.title("Model Accuracy")
plt.plot(history.history['accuracy'], label="Training")
plt.plot(history.history['val_accuracy'], label="Validation")
plt.legend()
plt.grid()

plt.subplot(1,4,3)
plt.title("IOU")
plt.plot(history.history['iou'], label="Training")
plt.plot(history.history['val_iou'], label="Validation")
plt.legend()
plt.grid()

Prediction¶

In [ ]:
deep3_pred = model.predict(colored_scan_test)
3/3 [==============================] - 2s 93ms/step
In [ ]:
plt.figure(figsize = (10,60))

i = 0
x = 0
while i < 45 :

    plt.subplot(15,3,i+1)
    plt.imshow(scan_test[x], 'gray')
    plt.title('Real medic Image')
    plt.axis('off')

    plt.subplot(15,3,i+2)
    plt.imshow(mask_test[x], 'gray')
    plt.title('Ground Truth Img')
    plt.axis('off')

    plt.subplot(15,3,i+3)
    plt.imshow(deep3_pred[x], 'gray')
    plt.title('Predicited Image')
    plt.axis('off')

    x += 1
    i += 3
plt.show()

Trying ensemble¶

Loading the models¶

In [ ]:
unet = load_model('/content/drive/MyDrive/breast cancer models/unet_70_v3.h5',
                  custom_objects={'dice_loss': dice_loss,"iou":iou})
#unet_70_v3 performing better
In [ ]:
unet_pred = unet.predict(scan_test)
3/3 [==============================] - 9s 745ms/step
In [ ]:
att_unet = load_model('/content/drive/MyDrive/breast cancer models/AttentionUNet_30.h5',
                      custom_objects={'dice_loss': dice_loss,"iou":iou,
                                      'EncoderBlock': EncoderBlock,'AttentionGate': AttentionGate,
                                      'DecoderBlock':DecoderBlock
                                      })
#v1 better iou here but v4 better training
In [ ]:
att_pred = att_unet.predict(scan_test)
3/3 [==============================] - 6s 795ms/step
In [ ]:
att_pred.shape
Out[ ]:
(65, 128, 128, 1)
In [ ]:
res_unet = load_model('/content/drive/MyDrive/breast cancer models/resunet_60_v3.h5',custom_objects={'dice_loss': dice_loss,"iou":iou})
In [ ]:
res_unet_pred = res_unet.predict(scan_test)
3/3 [==============================] - 5s 585ms/step
In [ ]:
custom_objects = {
    'MaxPoolingWithArgmax2D': MaxPoolingWithArgmax2D,
    'MaxUnpooling2D': MaxUnpooling2D,
    'dice_loss': dice_loss,
    'iou': iou
}

seg_net = load_model('/content/drive/MyDrive/breast cancer models/segnet_200_v3.h5', custom_objects=custom_objects)
In [ ]:
seg_net_pred = seg_net.predict(scan_test)
3/3 [==============================] - 7s 780ms/step
In [ ]:
def greytocolor(img):
  colored_img = img.repeat(3,axis = -1)
  return colored_img
In [ ]:
colored_scan_test = []
for i in range(len(scan_test)):
    colored_img = greytocolor(scan_test[i])
    colored_scan_test.append(colored_img)
colored_scan_test = np.array(colored_scan_test)
In [ ]:
deep3_model = load_model('/content/drive/MyDrive/breast cancer models/DeepLab_100.h5',
                         custom_objects = {'ConvBlock' :ConvBlock,
                                           'AtrousSpatialPyramidPooling':AtrousSpatialPyramidPooling,
                                           'dice_loss': dice_loss,
                                          'iou': iou
                                           })
In [ ]:
deep3_pred = deep3_model.predict(colored_scan_test)
WARNING:tensorflow:5 out of the last 13 calls to <function Model.make_predict_function.<locals>.predict_function at 0x789737500700> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for  more details.
3/3 [==============================] - 7s 493ms/step

Evaluation Metrics¶

In [ ]:
def dice_coeff(y_true, y_pred):
    smooth = 1.
    y_true = K.cast(y_true, 'float32')
    y_pred = K.cast(y_pred, 'float32')
    y_true_f = tf.reshape(y_true, [-1])
    y_pred_f = tf.reshape(y_pred, [-1])

    intersection = tf.reduce_sum(y_true_f * y_pred_f)
    union = tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f)

    # Check if both masks are empty (union is 0)
    if union == 0:
        score = 1.0  # Dice coefficient smoothing for empty masks
    else:
        score = (2. * intersection + smooth) / (union + smooth)

    return score
In [ ]:
def iou(true_mask,predicted_mask):
    intersection = np.sum(predicted_mask * true_mask)
    union = np.sum(predicted_mask) + np.sum(true_mask) - intersection

    if union == 0:
        iou = 1.0  # IoU smoothing
    else:
        iou = intersection / union

    return iou

Calculating iou and dice loss for each individual model¶

Deep3 Lab¶

In [ ]:
iou_scores = []
dice_scores = []

for i in range(len(scan_test)):
    iou_value = iou(deep3_pred[i],mask_test[i])

    dice = dice_coeff(deep3_pred[i],mask_test[i])
    iou_scores.append(iou_value)
    dice_scores.append(dice)

mean_iou_deep3 = np.nanmean(iou_scores)
mean_dice_deep3 = np.mean(dice_scores)

print(f"Mean IoU: {mean_iou_deep3}")
print(f"Mean Dice Coefiicient: {mean_dice_deep3}")
Mean IoU: 0.805148529868843
Mean Dice Coefiicient: 0.8799166679382324
In [ ]:
iou_scores = []
dice_scores = []

for i in range(len(scan_test)):
    iou_value = iou(deep3_pred[i],mask_test[i])

    dice = dice_coeff(deep3_pred[i],mask_test[i])
    iou_scores.append(iou_value)
    dice_scores.append(dice)

mean_iou_deep3 = np.nanmean(iou_scores)
mean_dice_deep3 = np.mean(dice_scores)

print(f"Mean IoU: {mean_iou_deep3}")
print(f"Mean Dice Coefiicient: {mean_dice_deep3}")
Mean IoU: 0.829233651685735
Mean Dice Coefiicient: 0.9006853103637695

Res_unet¶

In [ ]:
iou_scores = []
dice_scores = []

for i in range(len(scan_test)):
    iou_value = iou(res_unet_pred[i],mask_test[i])

    dice = dice_coeff(res_unet_pred[i],mask_test[i])
    iou_scores.append(iou_value)
    dice_scores.append(dice)

# Calculate the mean IoU and Dice coefficient scores
mean_iou_resunet = np.nanmean(iou_scores)  # Use np.nanmean to handle NaN values
mean_dice_resunet = np.mean(dice_scores)

print(f"Mean IoU: {mean_iou_resunet}")
print(f"Mean Dice Coefiicient: {mean_dice_resunet}")
Mean IoU: 0.5332620819679764
Mean Dice Coefiicient: 0.6529447436332703

Attention unet¶

In [ ]:
iou_scores = []
dice_scores = []

for i in range(len(scan_test)):
    iou_value = iou(att_pred[i],mask_test[i])

    dice = dice_coeff(att_pred[i],mask_test[i])
    iou_scores.append(iou_value)
    dice_scores.append(dice)

# Calculate the mean IoU and Dice coefficient scores
mean_iou_att = np.nanmean(iou_scores)  # Use np.nanmean to handle NaN values
mean_dice_att = np.mean(dice_scores)

print(f"Mean IoU: {mean_iou_att}")
print(f"Mean Dice Coefiicient: {mean_dice_att}")
Mean IoU: 0.6327860824300322
Mean Dice Coefiicient: 0.7562053799629211

Unet¶

In [ ]:
iou_scores = []
dice_scores = []

for i in range(len(scan_test)):
    iou_value = iou(unet_pred[i],mask_test[i])

    dice = dice_coeff(unet_pred[i],mask_test[i])
    iou_scores.append(iou_value)
    dice_scores.append(dice)

# Calculate the mean IoU and Dice coefficient scores
mean_iou_unet = np.nanmean(iou_scores)  # Use np.nanmean to handle NaN values
mean_dice_unet = np.mean(dice_scores)

print(f"Mean IoU: {mean_iou_unet}")
print(f"Mean Dice Coefiicient: {mean_dice_unet}")
Mean IoU: 0.5123432106595759
Mean Dice Coefiicient: 0.6281555891036987

Segnet¶

In [ ]:
iou_scores_segnet = []
dice_scores_segnet = []

for i in range(len(scan_test)):
    iou_value = iou(seg_net_pred[i], mask_test[i])
    dice = dice_coeff(seg_net_pred[i], mask_test[i])
    iou_scores_segnet.append(iou_value)
    dice_scores_segnet.append(dice)

mean_iou_segnet = np.nanmean(iou_scores_segnet)
mean_dice_segnet = np.mean(dice_scores_segnet)

print(f"Mean IoU for SegNet: {mean_iou_segnet}")
print(f"Mean Dice Coefiicient for SegNet: {mean_dice_segnet}")
Mean IoU for SegNet: 0.3687310088756075
Mean Dice Coefiicient for SegNet: 0.5049110651016235
In [ ]:
individual_results_df = pd.DataFrame({
    'Unet': [(mean_iou_unet + mean_dice_unet)/2],
    'Attention Unet':[ (mean_iou_att + mean_dice_att)/2],
    'ResUnet': [(mean_iou_resunet + mean_dice_resunet)/2],
    'Segnet': (mean_iou_segnet + mean_dice_segnet)/2,
    'DeepLab': (mean_iou_deep3 + mean_dice_deep3)/2
})
In [ ]:
individual_results_df
Out[ ]:
Unet Attention Unet ResUnet Segnet DeepLab
0 0.570249 0.694496 0.593103 0.436821 0.864959

Threshold Voting¶

In [ ]:
thresholds = [1.4,1.3,1.2,1.1,1.0,0.9,0.8,0.7,0.6]
mean_iou_DA_list = []
mean_dice_DA_list = []
mean_iou_DR_list = []
mean_dice_DR_list = []
mean_iou_DS_list = []
mean_dice_DS_list = []
mean_iou_DU_list = []
mean_dice_DU_list = []

for j in thresholds:
    ensemble_pred_DA = (deep3_pred + att_pred) >= j
    ensemble_pred_DR = (deep3_pred + res_unet_pred) >= j
    ensemble_pred_DS = (deep3_pred + seg_net_pred) >= j
    ensemble_pred_DU = (unet_pred + deep3_pred) >= j

    # Initialize lists for IoU and Dice Loss scores
    iou_scores_DA = []
    dice_coeff_DA = []
    iou_scores_DR = []
    dice_coeff_DR = []
    iou_scores_DS = []
    dice_coeff_DS = []
    iou_scores_DU = []
    dice_coeff_DU = []

    for i in range(len(scan_test)):

        # Calculate IoU and Dice Loss for U+A ensemble
        iou_value_DA = iou(ensemble_pred_DA[i], mask_test[i])
        dice_DA = dice_coeff(ensemble_pred_DA[i], mask_test[i])
        iou_scores_DA.append(iou_value_DA)
        dice_coeff_DA.append(dice_DA)

        # Calculate IoU and Dice Loss for U+R ensemble
        iou_value_DR = iou(ensemble_pred_DR[i], mask_test[i])
        dice_DR = dice_coeff(ensemble_pred_DR[i], mask_test[i])
        iou_scores_DR.append(iou_value_DR)
        dice_coeff_DR.append(dice_DR)

        # Calculate IoU and Dice Loss for U+S ensemble
        iou_value_DS = iou(ensemble_pred_DS[i], mask_test[i])
        dice_DS = dice_coeff(ensemble_pred_DS[i], mask_test[i])
        iou_scores_DS.append(iou_value_DS)
        dice_coeff_DS.append(dice_DS)

        iou_value_DU = iou(ensemble_pred_DU[i], mask_test[i])
        dice_DU = dice_coeff(ensemble_pred_DU[i], mask_test[i])
        iou_scores_DU.append(iou_value_DU)
        dice_coeff_DU.append(dice_DU)

    # Calculate the mean IoU and Dice coefficient scores for each ensemble

    mean_iou_DA = np.nanmean(iou_scores_DA)
    mean_dice_DA = np.mean(dice_coeff_DA)

    mean_iou_DR = np.nanmean(iou_scores_DR)
    mean_dice_DR = np.mean(dice_coeff_DR)

    mean_iou_DS = np.nanmean(iou_scores_DS)
    mean_dice_DS = np.mean(dice_coeff_DS)

    mean_iou_DU = np.nanmean(iou_scores_DU)
    mean_dice_DU = np.mean(dice_coeff_DU)

    # Append the results to the lists
    mean_iou_DA_list.append(mean_iou_DA)
    mean_dice_DA_list.append(mean_dice_DA)
    mean_iou_DR_list.append(mean_iou_DR)
    mean_dice_DR_list.append(mean_dice_DR)
    mean_iou_DS_list.append(mean_iou_DS)
    mean_dice_DS_list.append(mean_dice_DS)
    mean_iou_DU_list.append(mean_iou_DU)
    mean_dice_DU_list.append(mean_dice_DU)

# Create a DataFrame from the lists
Dresults_df = pd.DataFrame({
    'Threshold': thresholds,
    'Mean_IOU_D+A': mean_iou_DA_list,
    'Mean_Dice_Coeff_D+A': mean_dice_DA_list,
    'Mean_IOU_D+R': mean_iou_DR_list,
    'Mean_Dice_Coeff_D+R': mean_dice_DR_list,
    'Mean_IOU_D+S': mean_iou_DS_list,
    'Mean_Dice_Coeff_D+S': mean_dice_DS_list,
    'Mean_IOU_D+U': mean_iou_DU_list,
    'Mean_Dice_Coeff_D+U': mean_dice_DU_list,
})
In [ ]:
Dresults_df
Out[ ]:
Threshold Mean_IOU_D+A Mean_Dice_Coeff_D+A Mean_IOU_D+R Mean_Dice_Coeff_D+R Mean_IOU_D+S Mean_Dice_Coeff_D+S Mean_IOU_D+U Mean_Dice_Coeff_D+U
0 1.4 0.775039 0.850279 0.691177 0.768292 0.670528 0.752079 0.706092 0.793778
1 1.3 0.798494 0.868734 0.716607 0.790858 0.692126 0.768258 0.751527 0.829559
2 1.2 0.821519 0.889722 0.744378 0.816050 0.711938 0.783232 0.790735 0.863469
3 1.1 0.845371 0.910014 0.770727 0.840441 0.736049 0.804440 0.825507 0.891143
4 1.0 0.868123 0.926619 0.823759 0.893545 0.839417 0.908926 0.857303 0.916979
5 0.9 0.875757 0.931787 0.836072 0.904828 0.847617 0.914135 0.868637 0.924858
6 0.8 0.871759 0.929507 0.825280 0.895381 0.844272 0.911473 0.862384 0.921232
7 0.7 0.861289 0.922887 0.812624 0.884788 0.833653 0.903871 0.848868 0.912832
8 0.6 0.844784 0.912524 0.795150 0.872100 0.818295 0.892923 0.826015 0.897745
In [ ]:
thresholds = [1.4,1.3,1.2,1.1,1.0,0.9,0.8,0.7,0.6] # Replace with your desired threshold values
mean_iou_UA_list = []
mean_dice_UA_list = []
mean_iou_UR_list = []
mean_dice_UR_list = []
mean_iou_US_list = []
mean_dice_US_list = []

for j in thresholds:
    ensemble_pred_UA = (unet_pred + att_pred) >= j
    ensemble_pred_UR = (unet_pred + res_unet_pred) >= j
    ensemble_pred_US = (unet_pred + seg_net_pred) >= j

    # Initialize lists for IoU and Dice Loss scores
    iou_scores_UA = []
    dice_coeff_UA = []
    iou_scores_UR = []
    dice_coeff_UR = []
    iou_scores_US = []
    dice_coeff_US = []

    for i in range(len(scan_test)):

        # Calculate IoU and Dice Loss for U+A ensemble
        iou_value_UA = iou(ensemble_pred_UA[i], mask_test[i])
        dice_UA = dice_coeff(ensemble_pred_UA[i], mask_test[i])
        iou_scores_UA.append(iou_value_UA)
        dice_coeff_UA.append(dice_UA)

        # Calculate IoU and Dice Loss for U+R ensemble
        iou_value_UR = iou(ensemble_pred_UR[i], mask_test[i])
        dice_UR = dice_coeff(ensemble_pred_UR[i], mask_test[i])
        iou_scores_UR.append(iou_value_UR)
        dice_coeff_UR.append(dice_UR)

        # Calculate IoU and Dice Loss for U+S ensemble
        iou_value_US = iou(ensemble_pred_US[i], mask_test[i])
        dice_US = dice_coeff(ensemble_pred_US[i], mask_test[i])
        iou_scores_US.append(iou_value_US)
        dice_coeff_US.append(dice_US)

    # Calculate the mean IoU and Dice coefficient scores for each ensemble

    mean_iou_UA = np.nanmean(iou_scores_UA)
    mean_dice_UA = np.mean(dice_coeff_UA)

    mean_iou_UR = np.nanmean(iou_scores_UR)
    mean_dice_UR = np.mean(dice_coeff_UR)

    mean_iou_US = np.nanmean(iou_scores_US)
    mean_dice_US = np.mean(dice_coeff_US)

    # Append the results to the lists
    mean_iou_UA_list.append(mean_iou_UA)
    mean_dice_UA_list.append(mean_dice_UA)
    mean_iou_UR_list.append(mean_iou_UR)
    mean_dice_UR_list.append(mean_dice_UR)
    mean_iou_US_list.append(mean_iou_US)
    mean_dice_US_list.append(mean_dice_US)

# Create a DataFrame from the lists
Uresults_df = pd.DataFrame({
    'Threshold': thresholds,
    'Mean_IOU_U+A': mean_iou_UA_list,
    'Mean_Dice_Coeff_U+A': mean_dice_UA_list,
    'Mean_IOU_U+R': mean_iou_UR_list,
    'Mean_Dice_Coeff_U+R': mean_dice_UR_list,
    'Mean_IOU_U+S': mean_iou_US_list,
    'Mean_Dice_Coeff_U+S': mean_dice_US_list,
})
In [ ]:
Uresults_df
Out[ ]:
Threshold Mean_IOU_U+A Mean_Dice_Coeff_U+A Mean_IOU_U+R Mean_Dice_Coeff_U+R Mean_IOU_U+S Mean_Dice_Coeff_U+S
0 1.4 0.616100 0.721332 0.572106 0.674080 0.561234 0.664289
1 1.3 0.654747 0.754994 0.601574 0.698871 0.587510 0.687096
2 1.2 0.686497 0.781760 0.623115 0.716949 0.609176 0.705754
3 1.1 0.711642 0.802882 0.639085 0.731714 0.629206 0.723034
4 1.0 0.731955 0.819614 0.654143 0.745643 0.646247 0.737021
5 0.9 0.740393 0.827356 0.659940 0.751771 0.660480 0.750284
6 0.8 0.741326 0.830640 0.658252 0.751917 0.670884 0.762420
7 0.7 0.731590 0.824796 0.649212 0.746186 0.676324 0.770682
8 0.6 0.714419 0.813084 0.636593 0.739266 0.674466 0.772975
In [ ]:
# Define the threshold values
thresholds = [1.4, 1.3, 1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.6]  # Replace with your desired threshold values

# Initialize lists to store results
mean_iou_AR_list = []
mean_dice_AR_list = []
mean_iou_AS_list = []
mean_dice_AS_list = []

# Loop through the threshold values
for j in thresholds:
    # Ensemble with att_pred
    ensemble_pred_AR = (att_pred + res_unet_pred) >= j
    ensemble_pred_AS = (att_pred + seg_net_pred) >= j

    # Initialize lists for IoU and Dice Loss scores
    iou_scores_AR = []
    dice_scores_AR = []
    iou_scores_AS = []
    dice_scores_AS = []

    for i in range(len(scan_test)):

        # Calculate IoU and Dice Loss for A+R ensemble
        iou_value_AR = iou(ensemble_pred_AR[i], mask_test[i])
        dice_AR = dice_coeff(ensemble_pred_AR[i], mask_test[i])
        iou_scores_AR.append(iou_value_AR)
        dice_scores_AR.append(dice_AR)

        # Calculate IoU and Dice Loss for A+S ensemble
        iou_value_AS = iou(ensemble_pred_AS[i], mask_test[i])
        dice_AS = dice_coeff(ensemble_pred_AS[i], mask_test[i])
        iou_scores_AS.append(iou_value_AS)
        dice_scores_AS.append(dice_AS)

    # Calculate the mean IoU and Dice coefficient scores for each ensemble
    mean_iou_AR = np.nanmean(iou_scores_AR)
    mean_dice_AR = np.mean(dice_scores_AR)

    mean_iou_AS = np.nanmean(iou_scores_AS)
    mean_dice_AS = np.mean(dice_scores_AS)

    # Append the results to the lists
    mean_iou_AR_list.append(mean_iou_AR)
    mean_dice_AR_list.append(mean_dice_AR)
    mean_iou_AS_list.append(mean_iou_AS)
    mean_dice_AS_list.append(mean_dice_AS)

# Create a DataFrame from the lists
Aresults_df = pd.DataFrame({
    'Threshold': thresholds,
    'Mean_IOU_A+R': mean_iou_AR_list,
    'Mean_Dice_Coeff_A+R': mean_dice_AR_list,
    'Mean_IOU_A+S': mean_iou_AS_list,
    'Mean_Dice_Coeff_A+S': mean_dice_AS_list,
})
In [ ]:
Aresults_df
Out[ ]:
Threshold Mean_IOU_A+R Mean_Dice_Coeff_A+R Mean_IOU_A+S Mean_Dice_Coeff_A+S
0 1.4 0.628290 0.721953 0.613824 0.707953
1 1.3 0.649870 0.740117 0.631492 0.721438
2 1.2 0.668680 0.756099 0.646194 0.732344
3 1.1 0.681842 0.767383 0.660401 0.744948
4 1.0 0.694921 0.780314 0.683348 0.767828
5 0.9 0.702418 0.789083 0.713512 0.799844
6 0.8 0.705652 0.795886 0.728877 0.817702
7 0.7 0.706485 0.799219 0.735523 0.826856
8 0.6 0.702148 0.798077 0.733246 0.826513
In [ ]:
# Define the threshold values
thresholds = [1.4, 1.3, 1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.6]  # Replace with your desired threshold values

# Initialize lists to store results
mean_iou_RS_list = []
mean_dice_RS_list = []

# Loop through the threshold values
for j in thresholds:
    ensemble_pred_RS = (res_unet_pred + seg_net_pred) >= j

    # Initialize lists for IoU and Dice Loss scores
    iou_scores_RS = []
    dice_scores_RS = []

    for i in range(len(scan_test)):
        # Calculate IoU and Dice Loss for R+S ensemble
        iou_value_RS = iou(ensemble_pred_RS[i], mask_test[i])
        dice_RS = dice_coeff(ensemble_pred_RS[i], mask_test[i])
        iou_scores_RS.append(iou_value_RS)
        dice_scores_RS.append(dice_RS)

    # Calculate the mean IoU and Dice coefficient scores for each ensemble
    mean_iou_RS = np.nanmean(iou_scores_RS)
    mean_dice_RS = np.mean(dice_scores_RS)

    # Append the results to the lists

    mean_iou_RS_list.append(mean_iou_RS)
    mean_dice_RS_list.append(mean_dice_RS)

# Create a DataFrame from the lists
Rresults_df = pd.DataFrame({
    'Threshold': thresholds,
    'Mean_IOU_R+S': mean_iou_RS_list,
    'Mean_Dice_Coeff_R+S': mean_dice_RS_list,
})
In [ ]:
Rresults_df
Out[ ]:
Threshold Mean_IOU_R+S Mean_Dice_Coeff_R+S
0 1.4 0.575300 0.671648
1 1.3 0.595921 0.687965
2 1.2 0.612222 0.700644
3 1.1 0.627143 0.715241
4 1.0 0.647643 0.737992
5 0.9 0.660246 0.751313
6 0.8 0.662579 0.754877
7 0.7 0.660614 0.753986
8 0.6 0.651537 0.747940
In [ ]:
results_df = pd.DataFrame({
    'Threshold': Uresults_df['Threshold'],
    'IoU_Dice_Balance_D+A': (Dresults_df['Mean_IOU_D+A'] + (Dresults_df['Mean_Dice_Coeff_D+A'])) / 2,
    'IoU_Dice_Balance_D+R': (Dresults_df['Mean_IOU_D+R'] + (Dresults_df['Mean_Dice_Coeff_D+R'])) / 2,
    'IoU_Dice_Balance_D+S': (Dresults_df['Mean_IOU_D+S'] + (Dresults_df['Mean_Dice_Coeff_D+S'])) / 2,
    'IoU_Dice_Balance_D+U': (Dresults_df['Mean_IOU_D+U'] + (Dresults_df['Mean_Dice_Coeff_D+U'])) / 2,
    'IoU_Dice_Balance_U+A': (Uresults_df['Mean_IOU_U+A'] + (Uresults_df['Mean_Dice_Coeff_U+A'])) / 2,
    'IoU_Dice_Balance_U+R': (Uresults_df['Mean_IOU_U+R'] + (Uresults_df['Mean_Dice_Coeff_U+R'])) / 2,
    'IoU_Dice_Balance_U+S': (Uresults_df['Mean_IOU_U+S'] + (Uresults_df['Mean_Dice_Coeff_U+S'])) / 2,
    'IoU_Dice_Balance_A+R': (Aresults_df['Mean_IOU_A+R'] + (Aresults_df['Mean_Dice_Coeff_A+R'])) / 2,
    'IoU_Dice_Balance_A+S': (Aresults_df['Mean_IOU_A+S'] + (Aresults_df['Mean_Dice_Coeff_A+S'])) / 2,
    'IoU_Dice_Balance_R+S': (Rresults_df['Mean_IOU_R+S'] + (Rresults_df['Mean_Dice_Coeff_R+S'])) / 2
})
In [ ]:
results_df
Out[ ]:
Threshold IoU_Dice_Balance_D+A IoU_Dice_Balance_D+R IoU_Dice_Balance_D+S IoU_Dice_Balance_D+U IoU_Dice_Balance_U+A IoU_Dice_Balance_U+R IoU_Dice_Balance_U+S IoU_Dice_Balance_A+R IoU_Dice_Balance_A+S IoU_Dice_Balance_R+S
0 1.4 0.812659 0.729734 0.711304 0.749935 0.668716 0.623093 0.612762 0.675122 0.660889 0.623474
1 1.3 0.833614 0.753732 0.730192 0.790543 0.704870 0.650223 0.637303 0.694993 0.676465 0.641943
2 1.2 0.855621 0.780214 0.747585 0.827102 0.734128 0.670032 0.657465 0.712389 0.689269 0.656433
3 1.1 0.877693 0.805584 0.770245 0.858325 0.757262 0.685399 0.676120 0.724612 0.702674 0.671192
4 1.0 0.897371 0.858652 0.874172 0.887141 0.775785 0.699893 0.691634 0.737618 0.725588 0.692818
5 0.9 0.903772 0.870450 0.880876 0.896747 0.783874 0.705856 0.705382 0.745750 0.756678 0.705779
6 0.8 0.900633 0.860330 0.877873 0.891808 0.785983 0.705084 0.716652 0.750769 0.773289 0.708728
7 0.7 0.892088 0.848706 0.868762 0.880850 0.778193 0.697699 0.723503 0.752852 0.781189 0.707300
8 0.6 0.878654 0.833625 0.855609 0.861880 0.763752 0.687929 0.723720 0.750113 0.779880 0.699738
In [ ]:
import matplotlib.pyplot as plt

# Define the data
data = {
    'Threshold': results_df['Threshold'],
    'D+A': results_df['IoU_Dice_Balance_D+A'],
    'D+R': results_df['IoU_Dice_Balance_D+R'],
    'D+S': results_df['IoU_Dice_Balance_D+S'],
    'D+U': results_df['IoU_Dice_Balance_D+U'],
    'U+A': results_df['IoU_Dice_Balance_U+A'],
    'U+R': results_df['IoU_Dice_Balance_U+R'],
    'U+S': results_df['IoU_Dice_Balance_U+S'],
    'A+R': results_df['IoU_Dice_Balance_A+R'],
    'A+S': results_df['IoU_Dice_Balance_A+S'],
    'R+S': results_df['IoU_Dice_Balance_R+S'],

}

# Find the best values and corresponding thresholds for each IoU Dice balance
best_values = {}
for key in data.keys():
    if key != 'Threshold':
        best_idx = data[key].idxmax()
        best_values[key] = {'Threshold': data['Threshold'][best_idx], 'Value': data[key][best_idx]}
#        print(key,best_idx,best_values[key])
# Create a bar graph
plt.figure(figsize=(8, 6))

# Extract IoU Dice balance names and their best values
iou_dice_balances = best_values.keys()
best_iou_dice_values = [best_values[i]['Value'] for i in iou_dice_balances]
thresholds = [best_values[i]['Threshold'] for i in iou_dice_balances]

# Define colors for the bars
colors = ['red', 'blue', '#00fa00','#FF69B4',  'orange', 'cyan', '#BF40BF', 'yellow','#A95C68','#6495ED']
plt.bar(iou_dice_balances, best_iou_dice_values, color=colors)
plt.title("Best IoU Dice Balances at a Threshold")
plt.xlabel("Two model combinations")
plt.ylabel("IoU Dice Balances")

# Annotate thresholds above the bars
for i, value in enumerate(best_iou_dice_values):
    plt.text(i, value, f"{thresholds[i]:.2f}", ha='center', va='bottom')

plt.xticks(rotation= 0)

# Set y-axis limits from 0.8 to 1
plt.ylim(0.7, 0.91)
plt.yticks(np.arange(0.7, 0.91, 0.01))
# Add a kink from 0 to 0.8
plt.axhline(0.7, color='k')

plt.show()

After analyzing we can see we get best results after combining D+A at threshold 0.9

In [ ]:
thresholds = [1.7,1.6,1.5,1.4, 1.3, 1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.6]
mean_iou_SAU_list = []
mean_dice_SAU_list = []
mean_iou_SAD_list = []
mean_dice_SAD_list = []
mean_iou_SAR_list = []
mean_dice_SAR_list = []
mean_iou_SUD_list = []
mean_dice_SUD_list = []
mean_iou_SUR_list = []
mean_dice_SUR_list = []
mean_iou_SDR_list = []
mean_dice_SDR_list = []
mean_iou_AUD_list = []
mean_dice_AUD_list = []
mean_iou_AUR_list = []
mean_dice_AUR_list = []
mean_iou_ADR_list = []
mean_dice_ADR_list = []
mean_iou_UDR_list = []
mean_dice_UDR_list = []

for j in thresholds:
    ensemble_pred_SAU = (unet_pred + att_pred + seg_net_pred) >= j
    ensemble_pred_SAD = (seg_net_pred + att_pred + deep3_pred) >= j
    ensemble_pred_SAR = (seg_net_pred + att_pred + res_unet_pred) >= j
    ensemble_pred_SUD = (seg_net_pred + unet_pred + deep3_pred) >= j
    ensemble_pred_SUR = (seg_net_pred + unet_pred + res_unet_pred) >= j
    ensemble_pred_SDR = (seg_net_pred + deep3_pred + res_unet_pred) >= j
    ensemble_pred_AUD = (unet_pred + att_pred + deep3_pred) >= j
    ensemble_pred_AUR = (unet_pred + att_pred + res_unet_pred) >= j
    ensemble_pred_ADR = (deep3_pred + att_pred + res_unet_pred) >= j
    ensemble_pred_UDR = (unet_pred + deep3_pred + res_unet_pred) >= j

    # Initialize lists for IoU and Dice Loss scores
    iou_scores_SAU = []
    dice_scores_SAU = []

    iou_scores_SAD = []
    dice_scores_SAD = []

    iou_scores_SAR = []
    dice_scores_SAR = []

    iou_scores_SUD = []
    dice_scores_SUD = []

    iou_scores_SUR = []
    dice_scores_SUR = []

    iou_scores_SDR = []
    dice_scores_SDR = []

    iou_scores_AUD = []
    dice_scores_AUD = []

    iou_scores_AUR = []
    dice_scores_AUR = []

    iou_scores_ADR = []
    dice_scores_ADR = []

    iou_scores_UDR = []
    dice_scores_UDR = []

    for i in range(len(scan_test)):
        iou_value_SAU = iou(ensemble_pred_SAU[i], mask_test[i])
        dice_SAU = dice_coeff(ensemble_pred_SAU[i], mask_test[i])
        iou_scores_SAU.append(iou_value_SAU)
        dice_scores_SAU.append(dice_SAU)

        iou_value_SAD = iou(ensemble_pred_SAD[i], mask_test[i])
        dice_SAD = dice_coeff(ensemble_pred_SAD[i], mask_test[i])
        iou_scores_SAD.append(iou_value_SAD)
        dice_scores_SAD.append(dice_SAD)

        iou_value_SAR = iou(ensemble_pred_SAR[i], mask_test[i])
        dice_SAR = dice_coeff(ensemble_pred_SAR[i], mask_test[i])
        iou_scores_SAR.append(iou_value_SAR)
        dice_scores_SAR.append(dice_SAR)

        iou_value_SUD = iou(ensemble_pred_SUD[i], mask_test[i])
        dice_SUD = dice_coeff(ensemble_pred_SUD[i], mask_test[i])
        iou_scores_SUD.append(iou_value_SUD)
        dice_scores_SUD.append(dice_SUD)

        iou_value_SUR = iou(ensemble_pred_SUR[i], mask_test[i])
        dice_SUR = dice_coeff(ensemble_pred_SUR[i], mask_test[i])
        iou_scores_SUR.append(iou_value_SUR)
        dice_scores_SUR.append(dice_SUR)

        iou_value_SDR = iou(ensemble_pred_SDR[i], mask_test[i])
        dice_SDR = dice_coeff(ensemble_pred_SDR[i], mask_test[i])
        iou_scores_SDR.append(iou_value_SDR)
        dice_scores_SDR.append(dice_SDR)

        iou_value_AUD = iou(ensemble_pred_AUD[i], mask_test[i])
        dice_AUD = dice_coeff(ensemble_pred_AUD[i], mask_test[i])
        iou_scores_AUD.append(iou_value_AUD)
        dice_scores_AUD.append(dice_AUD)

        iou_value_AUR = iou(ensemble_pred_AUR[i], mask_test[i])
        dice_AUR = dice_coeff(ensemble_pred_AUR[i], mask_test[i])
        iou_scores_AUR.append(iou_value_AUR)
        dice_scores_AUR.append(dice_AUR)

        iou_value_ADR = iou(ensemble_pred_ADR[i], mask_test[i])
        dice_ADR = dice_coeff(ensemble_pred_ADR[i], mask_test[i])
        iou_scores_ADR.append(iou_value_ADR)
        dice_scores_ADR.append(dice_ADR)

        iou_value_UDR = iou(ensemble_pred_UDR[i], mask_test[i])
        dice_UDR = dice_coeff(ensemble_pred_UDR[i], mask_test[i])
        iou_scores_UDR.append(iou_value_UDR)
        dice_scores_UDR.append(dice_UDR)

    # Calculate the mean IoU and Dice coefficient scores for each ensemble
    mean_iou_SAU = np.nanmean(iou_scores_SAU)
    mean_dice_SAU = np.mean(dice_scores_SAU)

    mean_iou_SAD = np.nanmean(iou_scores_SAD)
    mean_dice_SAD = np.mean(dice_scores_SAD)

    mean_iou_SAR = np.nanmean(iou_scores_SAR)
    mean_dice_SAR = np.mean(dice_scores_SAR)

    mean_iou_SUD = np.nanmean(iou_scores_SUD)
    mean_dice_SUD = np.mean(dice_scores_SUD)

    mean_iou_SUR = np.nanmean(iou_scores_SUR)
    mean_dice_SUR = np.mean(dice_scores_SUR)

    mean_iou_SDR = np.nanmean(iou_scores_SDR)
    mean_dice_SDR = np.mean(dice_scores_SDR)

    mean_iou_AUD = np.nanmean(iou_scores_AUD)
    mean_dice_AUD = np.mean(dice_scores_AUD)

    mean_iou_AUR = np.nanmean(iou_scores_AUR)
    mean_dice_AUR = np.mean(dice_scores_AUR)

    mean_iou_ADR = np.nanmean(iou_scores_ADR)
    mean_dice_ADR = np.mean(dice_scores_ADR)

    mean_iou_UDR = np.nanmean(iou_scores_UDR)
    mean_dice_UDR = np.mean(dice_scores_UDR)

    # Append the results to the lists
    mean_iou_SAU_list.append(mean_iou_SAU)
    mean_dice_SAU_list.append(mean_dice_SAU)
    mean_iou_SAD_list.append(mean_iou_SAD)
    mean_dice_SAD_list.append(mean_dice_SAD)
    mean_iou_SAR_list.append(mean_iou_SAR)
    mean_dice_SAR_list.append(mean_dice_SAR)
    mean_iou_SUR_list.append(mean_iou_SUR)
    mean_dice_SUR_list.append(mean_dice_SUR)
    mean_iou_SUD_list.append(mean_iou_SUD)
    mean_dice_SUD_list.append(mean_dice_SUD)
    mean_iou_SDR_list.append(mean_iou_SDR)
    mean_dice_SDR_list.append(mean_dice_SDR)
    mean_iou_AUD_list.append(mean_iou_AUD)
    mean_dice_AUD_list.append(mean_dice_AUD)
    mean_iou_AUR_list.append(mean_iou_AUR)
    mean_dice_AUR_list.append(mean_dice_AUR)
    mean_iou_ADR_list.append(mean_iou_ADR)
    mean_dice_ADR_list.append(mean_dice_ADR)
    mean_iou_UDR_list.append(mean_iou_UDR)
    mean_dice_UDR_list.append(mean_dice_UDR)

# Create a DataFrame from the lists
three_results_df = pd.DataFrame({
    'Threshold': thresholds,
    'Mean_IOU_S+A+U': mean_iou_SAU_list,
    'Mean_Dice_Coeff_S+A+U': mean_dice_SAU_list,
    'Mean_IOU_S+A+D': mean_iou_SAD_list,
    'Mean_Dice_Coeff_S+A+D': mean_dice_SAD_list,
    'Mean_IOU_S+A+R': mean_iou_SAR_list,
    'Mean_Dice_Coeff_S+A+R': mean_dice_SAR_list,
    'Mean_IOU_S+U+R': mean_iou_SUR_list,
    'Mean_Dice_Coeff_S+U+R': mean_dice_SUR_list,
    'Mean_IOU_S+U+D': mean_iou_SUD_list,
    'Mean_Dice_Coeff_S+U+D': mean_dice_SUD_list,
    'Mean_IOU_S+D+R': mean_iou_SDR_list,
    'Mean_Dice_Coeff_S+D+R': mean_dice_SDR_list,
    'Mean_IOU_A+U+D': mean_iou_AUD_list,
    'Mean_Dice_Coeff_A+U+D': mean_dice_AUD_list,
    'Mean_IOU_A+U+R': mean_iou_AUR_list,
    'Mean_Dice_Coeff_A+U+R': mean_dice_AUR_list,
    'Mean_IOU_A+D+R': mean_iou_ADR_list,
    'Mean_Dice_Coeff_A+D+R': mean_dice_ADR_list,
    'Mean_IOU_U+D+R': mean_iou_UDR_list,
    'Mean_Dice_Coeff_U+D+R': mean_dice_UDR_list,
})
In [ ]:
three_results_df
Out[ ]:
Threshold Mean_IOU_S+A+U Mean_Dice_Coeff_S+A+U Mean_IOU_S+A+D Mean_Dice_Coeff_S+A+D Mean_IOU_S+A+R Mean_Dice_Coeff_S+A+R Mean_IOU_S+U+R Mean_Dice_Coeff_S+U+R Mean_IOU_S+U+D ... Mean_IOU_S+D+R Mean_Dice_Coeff_S+D+R Mean_IOU_A+U+D Mean_Dice_Coeff_A+U+D Mean_IOU_A+U+R Mean_Dice_Coeff_A+U+R Mean_IOU_A+D+R Mean_Dice_Coeff_A+D+R Mean_IOU_U+D+R Mean_Dice_Coeff_U+D+R
0 1.7 0.661710 0.749985 0.778422 0.858079 0.666831 0.750094 0.638095 0.728564 0.722848 ... 0.721628 0.802441 0.791756 0.863250 0.688140 0.775443 0.769071 0.844916 0.739364 0.812609
1 1.6 0.679109 0.765653 0.798465 0.874360 0.677886 0.761232 0.655574 0.744824 0.748972 ... 0.735570 0.814238 0.806259 0.874958 0.697814 0.783728 0.785026 0.859718 0.754174 0.827013
2 1.5 0.695555 0.781963 0.813087 0.885523 0.686213 0.769641 0.669127 0.756775 0.771823 ... 0.748130 0.824452 0.820523 0.888751 0.705778 0.790782 0.799414 0.872537 0.766575 0.840540
3 1.4 0.706607 0.793058 0.823143 0.893319 0.692397 0.776074 0.675534 0.764191 0.786916 ... 0.758554 0.833154 0.833135 0.900903 0.711287 0.796094 0.807500 0.881049 0.778390 0.852270
4 1.3 0.714557 0.802175 0.832788 0.900831 0.699684 0.784695 0.680649 0.770774 0.797130 ... 0.768252 0.841843 0.844247 0.911667 0.714852 0.800493 0.815246 0.888714 0.789454 0.864320
5 1.2 0.724651 0.812834 0.840004 0.907942 0.705660 0.791980 0.683028 0.773938 0.805279 ... 0.773421 0.847689 0.847977 0.914990 0.714933 0.802638 0.824893 0.897944 0.794711 0.871444
6 1.1 0.729569 0.819683 0.844118 0.911905 0.710546 0.798151 0.683241 0.775171 0.816106 ... 0.780651 0.856518 0.845705 0.913642 0.712329 0.802556 0.828994 0.902311 0.799235 0.877608
7 1.0 0.729097 0.820583 0.841292 0.909993 0.711163 0.800289 0.680708 0.774550 0.823396 ... 0.810518 0.888812 0.839187 0.909248 0.708393 0.801790 0.827751 0.900931 0.805777 0.885577
8 0.9 0.723984 0.818509 0.828039 0.900593 0.710560 0.803132 0.673055 0.769825 0.814187 ... 0.798870 0.879671 0.825051 0.899971 0.699612 0.796775 0.816109 0.891818 0.795091 0.877458
9 0.8 0.715307 0.814425 0.814313 0.891300 0.703809 0.801117 0.658930 0.759712 0.798318 ... 0.781461 0.865721 0.800841 0.883188 0.687787 0.789551 0.797886 0.878029 0.774209 0.860915
10 0.7 0.699824 0.803817 0.794352 0.877544 0.692171 0.793701 0.640052 0.745474 0.775329 ... 0.760965 0.849498 0.774401 0.863437 0.669561 0.776026 0.779251 0.863522 0.746922 0.839028
11 0.6 0.673588 0.784743 0.770884 0.861040 0.675373 0.781463 0.616353 0.727853 0.744937 ... 0.736037 0.830458 0.744944 0.841545 0.647649 0.759769 0.754524 0.845654 0.715539 0.814896

12 rows × 21 columns

In [ ]:
results2_df = pd.DataFrame({
    'Threshold': three_results_df['Threshold'],
    'IoU_Dice_Balance_S+A+U': (three_results_df['Mean_IOU_S+A+U'] + (three_results_df['Mean_Dice_Coeff_S+A+U'])) / 2,
    'IoU_Dice_Balance_S+A+D': (three_results_df['Mean_IOU_S+A+D'] + (three_results_df['Mean_Dice_Coeff_S+A+D'])) / 2,
    'IoU_Dice_Balance_S+A+R': (three_results_df['Mean_IOU_S+A+R'] + (three_results_df['Mean_Dice_Coeff_S+A+R'])) / 2,
    'IoU_Dice_Balance_S+U+D': (three_results_df['Mean_IOU_S+U+D'] + (three_results_df['Mean_Dice_Coeff_S+U+D'])) / 2,
    'IoU_Dice_Balance_S+U+R': (three_results_df['Mean_IOU_S+U+R'] + (three_results_df['Mean_Dice_Coeff_S+U+R'])) / 2,
    'IoU_Dice_Balance_S+D+R': (three_results_df['Mean_IOU_S+D+R'] + (three_results_df['Mean_Dice_Coeff_S+D+R'])) / 2,
    'IoU_Dice_Balance_A+U+D': (three_results_df['Mean_IOU_A+U+D'] + (three_results_df['Mean_Dice_Coeff_A+U+D'])) / 2,
    'IoU_Dice_Balance_A+U+R': (three_results_df['Mean_IOU_A+U+R'] + (three_results_df['Mean_Dice_Coeff_A+U+R'])) / 2,
    'IoU_Dice_Balance_A+D+R': (three_results_df['Mean_IOU_A+D+R'] + (three_results_df['Mean_Dice_Coeff_A+D+R'])) / 2,
    'IoU_Dice_Balance_U+D+R': (three_results_df['Mean_IOU_U+D+R'] + (three_results_df['Mean_Dice_Coeff_U+D+R'])) / 2,
})
In [ ]:
results2_df
Out[ ]:
Threshold IoU_Dice_Balance_S+A+U IoU_Dice_Balance_S+A+D IoU_Dice_Balance_S+A+R IoU_Dice_Balance_S+U+D IoU_Dice_Balance_S+U+R IoU_Dice_Balance_S+D+R IoU_Dice_Balance_A+U+D IoU_Dice_Balance_A+U+R IoU_Dice_Balance_A+D+R IoU_Dice_Balance_U+D+R
0 1.7 0.705848 0.818251 0.708462 0.762919 0.683329 0.762035 0.827503 0.731792 0.806993 0.775987
1 1.6 0.722381 0.836412 0.719559 0.788678 0.700199 0.774904 0.840608 0.740771 0.822372 0.790593
2 1.5 0.738759 0.849305 0.727927 0.810313 0.712951 0.786291 0.854637 0.748280 0.835976 0.803558
3 1.4 0.749832 0.858231 0.734235 0.824274 0.719862 0.795854 0.867019 0.753690 0.844274 0.815330
4 1.3 0.758366 0.866809 0.742190 0.834522 0.725712 0.805048 0.877957 0.757673 0.851980 0.826887
5 1.2 0.768742 0.873973 0.748820 0.842598 0.728483 0.810555 0.881483 0.758786 0.861419 0.833078
6 1.1 0.774626 0.878011 0.754348 0.852462 0.729206 0.818585 0.879673 0.757442 0.865653 0.838422
7 1.0 0.774840 0.875642 0.755726 0.860352 0.727629 0.849665 0.874217 0.755091 0.864341 0.845677
8 0.9 0.771247 0.864316 0.756846 0.852211 0.721440 0.839271 0.862511 0.748194 0.853963 0.836275
9 0.8 0.764866 0.852806 0.752463 0.838618 0.709321 0.823591 0.842015 0.738669 0.837957 0.817562
10 0.7 0.751821 0.835948 0.742936 0.818813 0.692763 0.805232 0.818919 0.722794 0.821387 0.792975
11 0.6 0.729166 0.815962 0.728418 0.792605 0.672103 0.783248 0.793244 0.703709 0.800089 0.765218
In [ ]:
import matplotlib.pyplot as plt

# Define the new data
data = {
    'Threshold': [1.7, 1.6, 1.5, 1.4, 1.3, 1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.6],
    'S+A+U': results2_df['IoU_Dice_Balance_S+A+U'],
    'S+A+D': results2_df['IoU_Dice_Balance_S+A+D'],
    'S+A+R': results2_df['IoU_Dice_Balance_S+A+R'],
    'S+U+D': results2_df['IoU_Dice_Balance_S+U+D'],
    'S+U+R': results2_df['IoU_Dice_Balance_S+U+R'],
    'S+D+R': results2_df['IoU_Dice_Balance_S+D+R'],
    'A+U+D': results2_df['IoU_Dice_Balance_A+U+D'],
    'A+U+R': results2_df['IoU_Dice_Balance_A+U+R'],
    'A+D+R': results2_df['IoU_Dice_Balance_A+D+R'],
    'U+D+R': results2_df['IoU_Dice_Balance_U+D+R'],
}

# Find the best values and corresponding thresholds for each IoU Dice balance
best_values = {}
for key in data.keys():
    if key != 'Threshold':
        best_idx = data[key].idxmax()
        best_values[key] = {'Threshold': data['Threshold'][best_idx], 'Value': data[key][best_idx]}

# Create a bar graph
plt.figure(figsize=(8, 6))

# Extract IoU Dice balance names and their best values
iou_dice_balances = best_values.keys()
best_iou_dice_values = [best_values[i]['Value'] for i in iou_dice_balances]
thresholds = [best_values[i]['Threshold'] for i in iou_dice_balances]

# Define colors for the bars
colors = ['red', 'blue', '#00fa00','#FF69B4',  'orange', 'cyan', '#BF40BF', 'yellow','#A95C68','#6495ED']

plt.bar(iou_dice_balances, best_iou_dice_values, color=colors)
plt.title("Best IoU Dice Balances at a Threshold")
plt.xlabel("Three model combinations")
plt.ylabel("IoU Dice Balances")

# Annotate thresholds above the bars
for i, value in enumerate(best_iou_dice_values):
    plt.text(i, value, f"{thresholds[i]:.2f}", ha='center', va='bottom')

plt.xticks(rotation= 0)

# Set y-axis limits from 0.8 to 1
plt.ylim(0.7, 0.9)
plt.yticks(np.arange(0.7, 0.9, 0.01))
# Add a kink from 0 to 0.8
plt.axhline(0., color='k')

plt.show()

Best combination is: S+A+D at threshold 1.3

In [ ]:
thresholds = [2.2,2.1,2.0,1.9,1.8,1.7,1.6,1.5,1.4, 1.3, 1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.6]
mean_iou_SAUD_list = []
mean_dice_SAUD_list = []
mean_iou_SAUR_list = []
mean_dice_SAUR_list = []
mean_iou_SUDR_list = []
mean_dice_SUDR_list = []
mean_iou_SADR_list = []
mean_dice_SADR_list = []
mean_iou_AUDR_list = []
mean_dice_AUDR_list = []

for j in thresholds:
    ensemble_pred_SAUD = (unet_pred + att_pred + seg_net_pred + deep3_pred) >= j
    ensemble_pred_SAUR = (seg_net_pred + att_pred + res_unet_pred + unet_pred) >= j
    ensemble_pred_SUDR = (seg_net_pred + unet_pred + deep3_pred +res_unet_pred) >= j
    ensemble_pred_SADR = (seg_net_pred + deep3_pred + res_unet_pred +att_pred) >= j
    ensemble_pred_AUDR = (unet_pred + att_pred + deep3_pred + res_unet_pred) >= j

    # Initialize lists for IoU and Dice Loss scores
    iou_scores_SAUD = []
    dice_scores_SAUD = []

    iou_scores_SAUR = []
    dice_scores_SAUR = []

    iou_scores_SUDR = []
    dice_scores_SUDR = []

    iou_scores_SADR = []
    dice_scores_SADR = []

    iou_scores_AUDR = []
    dice_scores_AUDR = []

    for i in range(len(scan_test)):
        iou_value_SAUD = iou(ensemble_pred_SAUD[i], mask_test[i])
        dice_SAUD = dice_coeff(ensemble_pred_SAUD[i], mask_test[i])
        iou_scores_SAUD.append(iou_value_SAUD)
        dice_scores_SAUD.append(dice_SAUD)

        iou_value_SAUR = iou(ensemble_pred_SAUR[i], mask_test[i])
        dice_SAUR = dice_coeff(ensemble_pred_SAUR[i], mask_test[i])
        iou_scores_SAUR.append(iou_value_SAUR)
        dice_scores_SAUR.append(dice_SAUR)

        iou_value_SUDR = iou(ensemble_pred_SUDR[i], mask_test[i])
        dice_SUDR = dice_coeff(ensemble_pred_SUDR[i], mask_test[i])
        iou_scores_SUDR.append(iou_value_SUDR)
        dice_scores_SUDR.append(dice_SUDR)

        iou_value_SADR = iou(ensemble_pred_SADR[i], mask_test[i])
        dice_SADR = dice_coeff(ensemble_pred_SADR[i], mask_test[i])
        iou_scores_SADR.append(iou_value_SADR)
        dice_scores_SADR.append(dice_SADR)

        iou_value_AUDR = iou(ensemble_pred_AUDR[i], mask_test[i])
        dice_AUDR = dice_coeff(ensemble_pred_AUDR[i], mask_test[i])
        iou_scores_AUDR.append(iou_value_AUDR)
        dice_scores_AUDR.append(dice_AUDR)

    # Calculate the mean IoU and Dice coefficient scores for each ensemble
    mean_iou_SAUD = np.nanmean(iou_scores_SAUD)
    mean_dice_SAUD = np.mean(dice_scores_SAUD)

    mean_iou_SAUR = np.nanmean(iou_scores_SAUR)
    mean_dice_SAUR = np.mean(dice_scores_SAUR)

    mean_iou_SUDR = np.nanmean(iou_scores_SUDR)
    mean_dice_SUDR = np.mean(dice_scores_SUDR)

    mean_iou_SADR = np.nanmean(iou_scores_SADR)
    mean_dice_SADR = np.mean(dice_scores_SADR)

    mean_iou_AUDR = np.nanmean(iou_scores_AUDR)
    mean_dice_AUDR = np.mean(dice_scores_AUDR)

    # Append the results to the lists
    mean_iou_SAUD_list.append(mean_iou_SAUD)
    mean_dice_SAUD_list.append(mean_dice_SAUD)
    mean_iou_SAUR_list.append(mean_iou_SAUR)
    mean_dice_SAUR_list.append(mean_dice_SAUR)
    mean_iou_SUDR_list.append(mean_iou_SUDR)
    mean_dice_SUDR_list.append(mean_dice_SUDR)
    mean_iou_SADR_list.append(mean_iou_SADR)
    mean_dice_SADR_list.append(mean_dice_SADR)
    mean_iou_AUDR_list.append(mean_iou_AUDR)
    mean_dice_AUDR_list.append(mean_dice_AUDR)

# Create a DataFrame from the lists
four_results_df = pd.DataFrame({
    'Threshold': thresholds,
    'Mean_IOU_S+A+U+D': mean_iou_SAUD_list,
    'Mean_Dice_Coeff_S+A+U+D': mean_dice_SAUD_list,
    'Mean_IOU_S+A+U+R': mean_iou_SAUR_list,
    'Mean_Dice_Coeff_S+A+U+R': mean_dice_SAUR_list,
    'Mean_IOU_S+U+D+R': mean_iou_SUDR_list,
    'Mean_Dice_Coeff_S+U+D+R': mean_dice_SUDR_list,
    'Mean_IOU_S+A+D+R': mean_iou_SADR_list,
    'Mean_Dice_Coeff_S+A+D+R': mean_dice_SADR_list,
    'Mean_IOU_A+U+D+R': mean_iou_AUDR_list,
    'Mean_Dice_Coeff_A+U+D+R': mean_dice_AUDR_list,
})
In [ ]:
four_results_df
Out[ ]:
Threshold Mean_IOU_S+A+U+D Mean_Dice_Coeff_S+A+U+D Mean_IOU_S+A+U+R Mean_Dice_Coeff_S+A+U+R Mean_IOU_S+U+D+R Mean_Dice_Coeff_S+U+D+R Mean_IOU_S+A+D+R Mean_Dice_Coeff_S+A+D+R Mean_IOU_A+U+D+R Mean_Dice_Coeff_A+U+D+R
0 2.2 0.753014 0.833605 0.674162 0.759498 0.716500 0.798774 0.737032 0.814629 0.753080 0.827637
1 2.1 0.770544 0.849564 0.685124 0.770728 0.729865 0.810614 0.749777 0.827000 0.763581 0.837356
2 2.0 0.786387 0.862539 0.693671 0.778906 0.743769 0.822867 0.763264 0.839830 0.773637 0.847199
3 1.9 0.798552 0.872657 0.703161 0.787586 0.756807 0.833748 0.775679 0.853136 0.782795 0.855989
4 1.8 0.807551 0.881407 0.708899 0.793049 0.765697 0.841977 0.788075 0.865884 0.792966 0.866873
5 1.7 0.814012 0.887120 0.714307 0.798242 0.772941 0.848474 0.797248 0.874751 0.797647 0.873276
6 1.6 0.818754 0.891390 0.716226 0.801005 0.775820 0.852461 0.803355 0.880154 0.800784 0.877684
7 1.5 0.820429 0.893483 0.717028 0.804294 0.775860 0.853869 0.806966 0.883345 0.803628 0.881689
8 1.4 0.821051 0.895339 0.714758 0.804428 0.773476 0.853539 0.806918 0.883248 0.805429 0.884805
9 1.3 0.817089 0.893652 0.712984 0.804806 0.771722 0.854608 0.805583 0.882709 0.804221 0.884934
10 1.2 0.811997 0.890462 0.707794 0.802268 0.768046 0.853815 0.804460 0.883657 0.799460 0.881874
11 1.1 0.802770 0.883792 0.700905 0.798253 0.764344 0.852726 0.799178 0.880735 0.789255 0.874420
12 1.0 0.786690 0.872345 0.690660 0.791538 0.761711 0.852978 0.788337 0.872157 0.776302 0.864885
13 0.9 0.769305 0.859501 0.678524 0.783282 0.745199 0.839758 0.771716 0.859619 0.756105 0.849768
14 0.8 0.747257 0.843091 0.662093 0.771992 0.722969 0.821742 0.751240 0.843984 0.731077 0.829939
15 0.7 0.721967 0.823959 0.640802 0.755855 0.695255 0.799774 0.727031 0.825391 0.702234 0.806433
16 0.6 0.689981 0.799810 0.613241 0.733864 0.663682 0.774801 0.699048 0.804117 0.671816 0.782337
In [ ]:
results4_df = pd.DataFrame({
    'Threshold': four_results_df['Threshold'],
    'IoU_Dice_Balance_S+A+U+D': (four_results_df['Mean_IOU_S+A+U+D'] + (four_results_df['Mean_Dice_Coeff_S+A+U+D'])) / 2,
    'IoU_Dice_Balance_S+A+U+R': (four_results_df['Mean_IOU_S+A+U+R'] + (four_results_df['Mean_Dice_Coeff_S+A+U+R'])) / 2,
    'IoU_Dice_Balance_S+U+D+R': (four_results_df['Mean_IOU_S+U+D+R'] + (four_results_df['Mean_Dice_Coeff_S+U+D+R'])) / 2,
    'IoU_Dice_Balance_S+A+D+R': (four_results_df['Mean_IOU_S+A+D+R'] + (four_results_df['Mean_Dice_Coeff_S+A+D+R'])) / 2,
    'IoU_Dice_Balance_A+U+D+R': (four_results_df['Mean_IOU_A+U+D+R'] + (four_results_df['Mean_Dice_Coeff_A+U+D+R'])) / 2,

})
In [ ]:
results4_df
Out[ ]:
Threshold IoU_Dice_Balance_S+A+U+D IoU_Dice_Balance_S+A+U+R IoU_Dice_Balance_S+U+D+R IoU_Dice_Balance_S+A+D+R IoU_Dice_Balance_A+U+D+R
0 2.2 0.793309 0.716830 0.757637 0.775830 0.790358
1 2.1 0.810054 0.727926 0.770240 0.788389 0.800469
2 2.0 0.824463 0.736289 0.783318 0.801547 0.810418
3 1.9 0.835605 0.745374 0.795277 0.814407 0.819392
4 1.8 0.844479 0.750974 0.803837 0.826980 0.829919
5 1.7 0.850566 0.756275 0.810708 0.835999 0.835461
6 1.6 0.855072 0.758615 0.814140 0.841755 0.839234
7 1.5 0.856956 0.760661 0.814864 0.845155 0.842658
8 1.4 0.858195 0.759593 0.813508 0.845083 0.845117
9 1.3 0.855370 0.758895 0.813165 0.844146 0.844577
10 1.2 0.851230 0.755031 0.810931 0.844059 0.840667
11 1.1 0.843281 0.749579 0.808535 0.839956 0.831838
12 1.0 0.829517 0.741099 0.807345 0.830247 0.820593
13 0.9 0.814403 0.730903 0.792479 0.815668 0.802937
14 0.8 0.795174 0.717043 0.772355 0.797612 0.780508
15 0.7 0.772963 0.698328 0.747515 0.776211 0.754333
16 0.6 0.744895 0.673552 0.719241 0.751583 0.727077
In [ ]:
import matplotlib.pyplot as plt
import numpy as np

# Define the new data
data = {
    'Threshold': [2.2, 2.1, 2.0, 1.9, 1.8, 1.7, 1.6, 1.5, 1.4, 1.3, 1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.6],
    'S+A+U+D': results4_df['IoU_Dice_Balance_S+A+U+D'],
    'S+A+U+R': results4_df['IoU_Dice_Balance_S+A+U+R'],
    'S+U+D+R': results4_df['IoU_Dice_Balance_S+A+D+R'],
    'S+A+D+R':results4_df['IoU_Dice_Balance_S+A+D+R'],
    'A+U+D+R': results4_df['IoU_Dice_Balance_A+U+D+R'],
}

# Find the best values and corresponding thresholds for each IoU Dice balance
best_values = {}
for key in data.keys():
    if key != 'Threshold':
        best_idx = data[key].idxmax()
        best_values[key] = {'Threshold': data['Threshold'][best_idx], 'Value': data[key][best_idx]}

# Create a bar graph
plt.figure(figsize=(6, 6))

# Extract IoU Dice balance names and their best values
iou_dice_balances = best_values.keys()
best_iou_dice_values = [best_values[i]['Value'] for i in iou_dice_balances]
thresholds = [best_values[i]['Threshold'] for i in iou_dice_balances]

# Define colors for the bars
colors = ['red', 'blue', '#00fa00','#FF69B4',  'orange']

plt.bar(iou_dice_balances, best_iou_dice_values, color=colors)
plt.title("Best IoU Dice Balances at a Threshold")
plt.xlabel("Four model combinations")
plt.ylabel("IoU Dice Balances")

# Annotate thresholds above the bars
for i, value in enumerate(best_iou_dice_values):
    plt.text(i, value, f"{thresholds[i]:.2f}", ha='center', va='bottom')

plt.xticks(rotation=0)

# Set y-axis limits from 0.7 to 0.9
plt.ylim(0.7, 0.9)
plt.yticks(np.arange(0.7, 0.9, 0.01))

# Add a kink from 0.7 to 0.9
plt.axhline(0.7, color='k')

plt.show()
In [ ]:
hresholds = [2.2,2.1,2.0,1.9,1.8,1.7,1.6,1.5,1.4, 1.3, 1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.6]
mean_iou_SAUDR_list = []
mean_dice_SAUDR_list = []
IoU_Dice_Balance_SAUDR_list = []

for j in thresholds:
    ensemble_pred_SAUDR = (unet_pred + att_pred + seg_net_pred + res_unet_pred +deep3_pred) >= j
    iou_scores_SAUDR = []
    dice_scores_SAUDR = []

    for i in range(len(scan_test)):
        iou_value_SAUDR = iou(ensemble_pred_SAUDR[i], mask_test[i])
        dice_SAUDR = dice_coeff(ensemble_pred_SAUDR[i], mask_test[i])
        iou_scores_SAUDR.append(iou_value_SAUDR)
        dice_scores_SAUDR.append(dice_SAUDR)

    mean_iou_SAUDR = np.nanmean(iou_scores_SAUDR)
    mean_dice_SAUDR = np.mean(dice_scores_SAUDR)

    # Calculate the IoU-Dice balance for each ensemble and threshold
    IoU_Dice_Balance_SAUDR = (mean_iou_SAUDR + (mean_dice_SAUDR)) / 2

    # Append the results to the respective lists
    IoU_Dice_Balance_SAUDR_list.append(IoU_Dice_Balance_SAUDR)

    # Append the results to the lists
    mean_iou_SAUDR_list.append(mean_iou_SAUDR)
    mean_dice_SAUDR_list.append(mean_dice_SAUDR)

# Create a DataFrame from the lists
SAUDR_results_df = pd.DataFrame({
    'Threshold': thresholds,
    'Mean_IOU_S+A+U+D+R': mean_iou_SAUDR_list,
    'Mean_Dice_Coeff_S+A+U+D+R': mean_dice_SAUDR_list,
    'IoU_Dice_Balance_S+A+U+D+R': IoU_Dice_Balance_SAUDR_list
})
In [ ]:

In [ ]:
SAUDR_results_df
Out[ ]:
Threshold Mean_IOU_S+A+U+D+R Mean_Dice_Coeff_S+A+U+D+R IoU_Dice_Balance_S+A+U+D+R
0 2.2 0.776605 0.853253 0.814929
1 2.1 0.782241 0.859242 0.820742
2 2.0 0.787372 0.864517 0.825945
3 1.9 0.790419 0.868137 0.829278
4 1.8 0.792288 0.871740 0.832014
5 1.7 0.792786 0.873233 0.833009
6 1.6 0.790806 0.872796 0.831801
7 1.5 0.787579 0.871010 0.829295
8 1.4 0.782505 0.868455 0.825480
9 1.3 0.775363 0.863830 0.819597
10 1.2 0.765758 0.856938 0.811348
11 1.1 0.751045 0.845725 0.798385
12 1.0 0.733611 0.832368 0.782990
13 0.9 0.714157 0.816949 0.765553
14 0.8 0.689845 0.797398 0.743621
15 0.7 0.661419 0.774602 0.718011
16 0.6 0.629901 0.749019 0.689460

Best Result is at threshold 2.1

In [ ]:
import matplotlib.pyplot as plt

# Define the data
data1 = {
    'Threshold': results_df['Threshold'],
    'D+A': results_df['IoU_Dice_Balance_D+A'],
    'D+R': results_df['IoU_Dice_Balance_D+R'],
    'D+S': results_df['IoU_Dice_Balance_D+S'],
    'D+U': results_df['IoU_Dice_Balance_D+U'],
    'U+A': results_df['IoU_Dice_Balance_U+A'],
    'U+R': results_df['IoU_Dice_Balance_U+R'],
    'U+S': results_df['IoU_Dice_Balance_U+S'],
    'A+R': results_df['IoU_Dice_Balance_A+R'],
    'A+S': results_df['IoU_Dice_Balance_A+S'],
    'R+S': results_df['IoU_Dice_Balance_R+S']
}
data2 = {
      'Threshold': results2_df['Threshold'],
        'S+A+U': results2_df['IoU_Dice_Balance_S+A+U'],
      'S+A+D': results2_df['IoU_Dice_Balance_S+A+D'],
      'S+A+R': results2_df['IoU_Dice_Balance_S+A+R'],
      'S+U+D': results2_df['IoU_Dice_Balance_S+U+D'],
      'S+U+R': results2_df['IoU_Dice_Balance_S+U+R'],
      'S+D+R': results2_df['IoU_Dice_Balance_S+D+R'],
      'A+U+D': results2_df['IoU_Dice_Balance_A+U+D'],
      'A+U+R': results2_df['IoU_Dice_Balance_A+U+R'],
      'A+D+R': results2_df['IoU_Dice_Balance_A+D+R'],
      'U+D+R': results2_df['IoU_Dice_Balance_U+D+R']
}
data3 = {
      'Threshold': results4_df['Threshold'],
      'S+A+U+D': results4_df['IoU_Dice_Balance_S+A+U+D'],
    'S+A+U+R': results4_df['IoU_Dice_Balance_S+A+U+R'],
    'S+U+D+R': results4_df['IoU_Dice_Balance_S+A+D+R'],
    'S+A+D+R':results4_df['IoU_Dice_Balance_S+A+D+R'],
    'A+U+D+R': results4_df['IoU_Dice_Balance_A+U+D+R'],
    'S+A+U+D+R':SAUDR_results_df['IoU_Dice_Balance_S+A+U+D+R']

}

# Find the best values and corresponding thresholds for each IoU Dice balance
best_values = {}
for key in data1.keys():
    if key != 'Threshold':
        best_idx = data1[key].idxmax()
        best_values[key] = {'Threshold': data1['Threshold'][best_idx], 'Value': data1[key][best_idx]}
for key in data2.keys():
    if key != 'Threshold':
        best_idx = data2[key].idxmax()
        best_values[key] = {'Threshold': data2['Threshold'][best_idx], 'Value': data2[key][best_idx]}
for key in data3.keys():
    if key != 'Threshold':
        best_idx = data3[key].idxmax()
        best_values[key] = {'Threshold': data3['Threshold'][best_idx], 'Value': data3[key][best_idx]}

plt.figure(figsize=(15, 6))

# Extract IoU Dice balance names and their best values
iou_dice_balances = best_values.keys()
best_iou_dice_values = [best_values[i]['Value'] for i in iou_dice_balances]
thresholds = [best_values[i]['Threshold'] for i in iou_dice_balances]

# Define colors for the bars
colors = ['red', 'blue', '#00fa00','#FF69B4',  'orange', 'cyan', '#BF40BF', 'yellow','#A95C68','#6495ED']
plt.bar(iou_dice_balances, best_iou_dice_values, color=colors)
plt.title("Best IoU Dice Balances of Different Model Combinations")
plt.xlabel("Different model combinations")
plt.ylabel("IoU Dice Balances")

# Annotate thresholds above the bars
#for i, value in enumerate(best_iou_dice_values):
#    plt.text(i, value, f"{thresholds[i]:.2f}", ha='center', va='bottom')
for i, value in enumerate(best_iou_dice_values):
    plt.text(i, value,f"{thresholds[i]:.2f}", ha='center', va='bottom')

plt.xticks(rotation= 90)
plt.xlim(-0.7, len(iou_dice_balances) - 0.3)
# Set y-axis limits from 0.8 to 1
plt.ylim(0.7, 0.92)
plt.yticks(np.arange(0.7, 0.92, 0.01))
# Add a kink from 0 to 0.8
plt.axhline(0.7, color='k')

plt.show()
In [ ]:
ensemble_pred = (unet_pred + att_pred ) >= 0.8
In [ ]:
iou_scores = []
dice_coefficient = []

for i in range(len(scan_test)):
    iou_value = iou(ensemble_pred[i],mask_test[i])
    dice = dice_coeff(ensemble_pred[i],mask_test[i])
    iou_scores.append(iou_value)
    dice_coefficient.append(dice)

# Calculate the mean IoU and Dice coefficient scores
mean_iou_ensemble = np.nanmean(iou_scores)  # Use np.nanmean to handle NaN values
mean_dice_ensemble = np.mean(dice_coefficient)

print(f"Mean IoU: {mean_iou_ensemble}")
print(f"Mean Dice coefficient: {mean_dice_ensemble}")
Mean IoU: 0.7392150464903354
Mean Dice coefficient: 0.8212698101997375
In [ ]:
scan_test.shape[0]
Out[ ]:
65
In [ ]:
plt.figure(figsize=(15, 60))  # Adjust the figsize as needed

i = 0
x = 0
while i < 60:
    plt.subplot(15, 6, i + 1)
    plt.imshow(scan_test[x], 'gray')
    plt.title('Real Medic Image')
    plt.axis('off')

    plt.subplot(15, 6, i + 2)
    plt.imshow(mask_test[x], 'gray')
    plt.title('Ground Truth Img')
    plt.axis('off')

    plt.subplot(15, 6, i + 3)
    plt.imshow(ensemble_pred[x], 'gray')
    plt.title('Ensemble Result')
    plt.axis('off')

    plt.subplot(15, 6, i + 4)
    plt.imshow(unet_pred[x], 'gray')
    plt.title('Unet Result')
    plt.axis('off')

    plt.subplot(15, 6, i + 5)
    plt.imshow(att_pred[x], 'gray')
    plt.title('Attention UNet Result')
    plt.axis('off')

    x = np.random.randint(0,65)
    i += 6

plt.tight_layout()
plt.show()

Plotting results¶

In [ ]:
ensemble_pred = (deep3_pred + att_pred ) >= 0.9
In [ ]:
iou_scores = []
dice_coefficient = []

for i in range(len(scan_test)):
    iou_value = iou(ensemble_pred[i],mask_test[i])
    dice = dice_coeff(ensemble_pred[i],mask_test[i])
    iou_scores.append(iou_value)
    dice_coefficient.append(dice)

# Calculate the mean IoU and Dice coefficient scores
mean_iou_ensemble = np.nanmean(iou_scores)  # Use np.nanmean to handle NaN values
mean_dice_ensemble = np.mean(dice_coefficient)

print(f"Mean IoU: {mean_iou_ensemble}")
print(f"Mean Dice coefficient: {mean_dice_ensemble}")
Mean IoU: 0.8486169772270742
Mean Dice coefficient: 0.9102286100387573
In [ ]:
plt.figure(figsize=(15, 60))  # Adjust the figsize as needed

i = 0
x = 0
while i < 60:
    plt.subplot(15, 6, i + 1)
    plt.imshow(scan_test[x], 'gray')
    plt.title('Real Medic Image')
    plt.axis('off')

    plt.subplot(15, 6, i + 2)
    plt.imshow(mask_test[x], 'gray')
    plt.title('Ground Truth Img')
    plt.axis('off')

    plt.subplot(15, 6, i + 3)
    plt.imshow(ensemble_pred[x], 'gray')
    plt.title('Ensemble Result')
    plt.axis('off')

    plt.subplot(15, 6, i + 4)
    plt.imshow(deep3_pred[x], 'gray')
    plt.title('DeepLab Result')
    plt.axis('off')

    plt.subplot(15, 6, i + 5)
    plt.imshow(att_pred[x], 'gray')
    plt.title('Attention UNet Result')
    plt.axis('off')

    x += 1
    i += 6

plt.tight_layout()
plt.show()
In [ ]:
indices = range(len(iou_scores))

# Plot IoU scores
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.plot(indices, iou_scores, marker='o', linestyle='-')
plt.title('IoU Scores')
plt.xlabel('Image Number')
plt.ylabel('IoU')

# Plot Dice scores
plt.subplot(1, 2, 2)
plt.plot(indices, dice_coefficient, marker='o', linestyle='-')
plt.title('Dice Coefficient')
plt.xlabel('Image Number')
plt.ylabel('Dice Coefficient')

plt.tight_layout()
plt.show()

Final Results¶

In [ ]:
image_path = [
    '/content/drive/MyDrive/Dataset_BUSI_with_GT/benign/benign (110).png',
    '/content/drive/MyDrive/Dataset_BUSI_with_GT/benign/benign (100).png',
    '/content/drive/MyDrive/Dataset_BUSI_with_GT/benign/benign (101).png',
    '/content/drive/MyDrive/Dataset_BUSI_with_GT/benign/benign (107).png',
    '/content/drive/MyDrive/Dataset_BUSI_with_GT/normal/normal (101).png',
    '/content/drive/MyDrive/Dataset_BUSI_with_GT/normal/normal (111).png',
    '/content/drive/MyDrive/Dataset_BUSI_with_GT/normal/normal (106).png',
    '/content/drive/MyDrive/Dataset_BUSI_with_GT/malignant/malignant (115).png',
    '/content/drive/MyDrive/Dataset_BUSI_with_GT/malignant/malignant (111).png',
    '/content/drive/MyDrive/Dataset_BUSI_with_GT/malignant/malignant (110).png',
]
In [ ]:
testX = []
for img in image_path :
    testX.append(img_to_array(Image.fromarray(cv2.resize(cv2.imread(img, cv2.IMREAD_GRAYSCALE), (128,128)))))
In [ ]:
testX = np.array(testX)
testX/= 255.0
In [ ]:
print(testX.shape)
print(f'Minimum : {testX.min()}')
print(f'Maximum : {testX.max()}')
(10, 128, 128, 1)
Minimum : 0.0
Maximum : 1.0
In [ ]:
plt.imshow(testX[0], 'gray')
plt.axis('off')
Out[ ]:
(-0.5, 127.5, 127.5, -0.5)
In [ ]:
predY = att_model.predict(testX)
1/1 [==============================] - 2s 2s/step
In [ ]:
print(predY.shape)
(10, 128, 128, 1)
In [ ]:
plt.imshow(predY[0], 'gray')
plt.axis('off')
Out[ ]:
(-0.5, 127.5, 127.5, -0.5)
In [ ]:
print(predY.min())
print(predY.max())
0.0
0.9999999
In [ ]:
colored_predY = []
for i in range(len(predY)):
    colored_img = greytocolor(predY[i])
    colored_predY.append(colored_img)
colored_predY = np.array(colored_predY)
In [ ]:
pred_label = vgg16_model.predict(colored_predY)
1/1 [==============================] - 1s 923ms/step
In [ ]:
pred_label.shape
Out[ ]:
(10, 3)
In [ ]:
print(np.argmax(pred_label, axis = 1))
plt.figure(figsize = (5,30))

i = 0
j = 0
while i < 20 :

    plt.subplot(10,2,i+1)
    plt.imshow (testX[j], 'gray')
    plt.title('Original Image', fontsize = 15)
    plt.axis('off')

    plt.subplot(10,2,i+2)
    plt.imshow (predY[j], 'gray')
    plt.title(f'{info[np.argmax(pred_label[j])]}', fontsize = 15)
    plt.axis('off')

    j += 1
    i += 2
plt.show()
[2 2 2 2 0 0 0 1 1 1]
In [ ]:
print(np.argmax(colored_predY, axis = 1))
[2 2 2 2 0 0 0 1 1 1]